aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c89
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c910
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h254
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c27
9 files changed, 1315 insertions, 50 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index a31e05f3bfd4..df0002f78805 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -23,11 +23,23 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
23 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 23 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24 struct device, kobj))); 24 struct device, kobj)));
25 struct qla_hw_data *ha = vha->hw; 25 struct qla_hw_data *ha = vha->hw;
26 int rval = 0;
26 27
27 if (ha->fw_dump_reading == 0) 28 if (ha->fw_dump_reading == 0)
28 return 0; 29 return 0;
29 30
30 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 31 if (IS_QLA82XX(ha)) {
32 if (off < ha->md_template_size) {
33 rval = memory_read_from_buffer(buf, count,
34 &off, ha->md_tmplt_hdr, ha->md_template_size);
35 return rval;
36 }
37 off -= ha->md_template_size;
38 rval = memory_read_from_buffer(buf, count,
39 &off, ha->md_dump, ha->md_dump_size);
40 return rval;
41 } else
42 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
31 ha->fw_dump_len); 43 ha->fw_dump_len);
32} 44}
33 45
@@ -41,12 +53,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
41 struct qla_hw_data *ha = vha->hw; 53 struct qla_hw_data *ha = vha->hw;
42 int reading; 54 int reading;
43 55
44 if (IS_QLA82XX(ha)) {
45 ql_dbg(ql_dbg_user, vha, 0x705b,
46 "Firmware dump not supported for ISP82xx\n");
47 return count;
48 }
49
50 if (off != 0) 56 if (off != 0)
51 return (0); 57 return (0);
52 58
@@ -59,6 +65,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
59 ql_log(ql_log_info, vha, 0x705d, 65 ql_log(ql_log_info, vha, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha->host_no); 66 "Firmware dump cleared on (%ld).\n", vha->host_no);
61 67
68 if (IS_QLA82XX(vha->hw)) {
69 qla82xx_md_free(vha);
70 qla82xx_md_prep(vha);
71 }
62 ha->fw_dump_reading = 0; 72 ha->fw_dump_reading = 0;
63 ha->fw_dumped = 0; 73 ha->fw_dumped = 0;
64 break; 74 break;
@@ -75,7 +85,26 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
75 qla2x00_alloc_fw_dump(vha); 85 qla2x00_alloc_fw_dump(vha);
76 break; 86 break;
77 case 3: 87 case 3:
78 qla2x00_system_error(vha); 88 if (IS_QLA82XX(ha)) {
89 qla82xx_idc_lock(ha);
90 qla82xx_set_reset_owner(vha);
91 qla82xx_idc_unlock(ha);
92 } else
93 qla2x00_system_error(vha);
94 break;
95 case 4:
96 if (IS_QLA82XX(ha)) {
97 if (ha->md_tmplt_hdr)
98 ql_dbg(ql_dbg_user, vha, 0x705b,
99 "MiniDump supported with this firmware.\n");
100 else
101 ql_dbg(ql_dbg_user, vha, 0x709d,
102 "MiniDump not supported with this firmware.\n");
103 }
104 break;
105 case 5:
106 if (IS_QLA82XX(ha))
107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
79 break; 108 break;
80 } 109 }
81 return (count); 110 return (count);
@@ -546,6 +575,11 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
546 575
547 scsi_block_requests(vha->host); 576 scsi_block_requests(vha->host);
548 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 577 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
578 if (IS_QLA82XX(ha)) {
579 qla82xx_idc_lock(ha);
580 qla82xx_set_reset_owner(vha);
581 qla82xx_idc_unlock(ha);
582 }
549 qla2xxx_wake_dpc(vha); 583 qla2xxx_wake_dpc(vha);
550 qla2x00_wait_for_chip_reset(vha); 584 qla2x00_wait_for_chip_reset(vha);
551 scsi_unblock_requests(vha->host); 585 scsi_unblock_requests(vha->host);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d79cd8a5f831..238bc9178219 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -403,7 +403,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
403 return ptr + sizeof(struct qla2xxx_mq_chain); 403 return ptr + sizeof(struct qla2xxx_mq_chain);
404} 404}
405 405
406static void 406void
407qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 407qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
408{ 408{
409 struct qla_hw_data *ha = vha->hw; 409 struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a03eaf40f377..fcf052c50bf5 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2438,7 +2438,8 @@ struct qla_hw_data {
2438 uint32_t quiesce_owner:1; 2438 uint32_t quiesce_owner:1;
2439 uint32_t thermal_supported:1; 2439 uint32_t thermal_supported:1;
2440 uint32_t isp82xx_reset_hdlr_active:1; 2440 uint32_t isp82xx_reset_hdlr_active:1;
2441 /* 26 bits */ 2441 uint32_t isp82xx_reset_owner:1;
2442 /* 28 bits */
2442 } flags; 2443 } flags;
2443 2444
2444 /* This spinlock is used to protect "io transactions", you must 2445 /* This spinlock is used to protect "io transactions", you must
@@ -2822,6 +2823,12 @@ struct qla_hw_data {
2822 2823
2823 uint8_t fw_type; 2824 uint8_t fw_type;
2824 __le32 file_prd_off; /* File firmware product offset */ 2825 __le32 file_prd_off; /* File firmware product offset */
2826
2827 uint32_t md_template_size;
2828 void *md_tmplt_hdr;
2829 dma_addr_t md_tmplt_hdr_dma;
2830 void *md_dump;
2831 uint32_t md_dump_size;
2825}; 2832};
2826 2833
2827/* 2834/*
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 29b1a3e28231..df58e2451781 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -104,6 +104,8 @@ extern int ql2xenablehba_err_chk;
104extern int ql2xtargetreset; 104extern int ql2xtargetreset;
105extern int ql2xdontresethba; 105extern int ql2xdontresethba;
106extern unsigned int ql2xmaxlun; 106extern unsigned int ql2xmaxlun;
107extern int ql2xmdcapmask;
108extern int ql2xmdenable;
107 109
108extern int qla2x00_loop_reset(scsi_qla_host_t *); 110extern int qla2x00_loop_reset(scsi_qla_host_t *);
109extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 111extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -442,6 +444,7 @@ extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
442extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); 444extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
443extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, 445extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
444 uint8_t *, uint32_t); 446 uint8_t *, uint32_t);
447extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
445 448
446/* 449/*
447 * Global Function Prototypes in qla_gs.c source file. 450 * Global Function Prototypes in qla_gs.c source file.
@@ -570,6 +573,7 @@ extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
570extern void qla82xx_start_iocbs(srb_t *); 573extern void qla82xx_start_iocbs(srb_t *);
571extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); 574extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
572extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); 575extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
576extern char *qdev_state(uint32_t);
573 577
574/* BSG related functions */ 578/* BSG related functions */
575extern int qla24xx_bsg_request(struct fc_bsg_job *); 579extern int qla24xx_bsg_request(struct fc_bsg_job *);
@@ -579,4 +583,14 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
579 dma_addr_t, size_t, uint32_t); 583 dma_addr_t, size_t, uint32_t);
580extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, 584extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
581 uint16_t *, uint16_t *); 585 uint16_t *, uint16_t *);
586
587/* Minidump related functions */
588extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
589extern int qla82xx_md_get_template(scsi_qla_host_t *);
590extern int qla82xx_md_alloc(scsi_qla_host_t *);
591extern void qla82xx_md_free(scsi_qla_host_t *);
592extern int qla82xx_md_collect(scsi_qla_host_t *);
593extern void qla82xx_md_prep(scsi_qla_host_t *);
594extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
595
582#endif /* _QLA_GBL_H */ 596#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 37da04d3db26..2375e38964e9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1503,10 +1503,8 @@ enable_82xx_npiv:
1503 &ha->fw_xcb_count, NULL, NULL, 1503 &ha->fw_xcb_count, NULL, NULL,
1504 &ha->max_npiv_vports, NULL); 1504 &ha->max_npiv_vports, NULL);
1505 1505
1506 if (!fw_major_version && ql2xallocfwdump) { 1506 if (!fw_major_version && ql2xallocfwdump)
1507 if (!IS_QLA82XX(ha)) 1507 qla2x00_alloc_fw_dump(vha);
1508 qla2x00_alloc_fw_dump(vha);
1509 }
1510 } 1508 }
1511 } else { 1509 } else {
1512 ql_log(ql_log_fatal, vha, 0x00cd, 1510 ql_log(ql_log_fatal, vha, 0x00cd,
@@ -1924,7 +1922,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1924 rval = qla84xx_init_chip(vha); 1922 rval = qla84xx_init_chip(vha);
1925 if (rval != QLA_SUCCESS) { 1923 if (rval != QLA_SUCCESS) {
1926 ql_log(ql_log_warn, 1924 ql_log(ql_log_warn,
1927 vha, 0x8043, 1925 vha, 0x8026,
1928 "Init chip failed.\n"); 1926 "Init chip failed.\n");
1929 break; 1927 break;
1930 } 1928 }
@@ -1933,7 +1931,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1933 cs84xx_time = jiffies - cs84xx_time; 1931 cs84xx_time = jiffies - cs84xx_time;
1934 wtime += cs84xx_time; 1932 wtime += cs84xx_time;
1935 mtime += cs84xx_time; 1933 mtime += cs84xx_time;
1936 ql_dbg(ql_dbg_taskm, vha, 0x8042, 1934 ql_dbg(ql_dbg_taskm, vha, 0x8025,
1937 "Increasing wait time by %ld. " 1935 "Increasing wait time by %ld. "
1938 "New time %ld.\n", cs84xx_time, 1936 "New time %ld.\n", cs84xx_time,
1939 wtime); 1937 wtime);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index f7604ea1af83..a41137452c41 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4186,3 +4186,92 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4186 4186
4187 return rval; 4187 return rval;
4188} 4188}
4189
4190int
4191qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4192{
4193 struct qla_hw_data *ha = vha->hw;
4194 mbx_cmd_t mc;
4195 mbx_cmd_t *mcp = &mc;
4196 int rval = QLA_FUNCTION_FAILED;
4197
4198 ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
4199
4200 memset(mcp->mb, 0 , sizeof(mcp->mb));
4201 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4202 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4203 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
4204 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
4205
4206 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4207 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
4208 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4209
4210 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4211 mcp->tov = MBX_TOV_SECONDS;
4212 rval = qla2x00_mailbox_command(vha, mcp);
4213
4214 /* Always copy back return mailbox values. */
4215 if (rval != QLA_SUCCESS) {
4216 ql_dbg(ql_dbg_mbx, vha, 0x1120,
4217 "mailbox command FAILED=0x%x, subcode=%x.\n",
4218 (mcp->mb[1] << 16) | mcp->mb[0],
4219 (mcp->mb[3] << 16) | mcp->mb[2]);
4220 } else {
4221 ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
4222 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4223 if (!ha->md_template_size) {
4224 ql_dbg(ql_dbg_mbx, vha, 0x1122,
4225 "Null template size obtained.\n");
4226 rval = QLA_FUNCTION_FAILED;
4227 }
4228 }
4229 return rval;
4230}
4231
4232int
4233qla82xx_md_get_template(scsi_qla_host_t *vha)
4234{
4235 struct qla_hw_data *ha = vha->hw;
4236 mbx_cmd_t mc;
4237 mbx_cmd_t *mcp = &mc;
4238 int rval = QLA_FUNCTION_FAILED;
4239
4240 ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
4241
4242 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4243 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
4244 if (!ha->md_tmplt_hdr) {
4245 ql_log(ql_log_warn, vha, 0x1124,
4246 "Unable to allocate memory for Minidump template.\n");
4247 return rval;
4248 }
4249
4250 memset(mcp->mb, 0 , sizeof(mcp->mb));
4251 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4252 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4253 mcp->mb[2] = LSW(RQST_TMPLT);
4254 mcp->mb[3] = MSW(RQST_TMPLT);
4255 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
4256 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
4257 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
4258 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
4259 mcp->mb[8] = LSW(ha->md_template_size);
4260 mcp->mb[9] = MSW(ha->md_template_size);
4261
4262 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4263 mcp->tov = MBX_TOV_SECONDS;
4264 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
4265 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4266 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4267 rval = qla2x00_mailbox_command(vha, mcp);
4268
4269 if (rval != QLA_SUCCESS) {
4270 ql_dbg(ql_dbg_mbx, vha, 0x1125,
4271 "mailbox command FAILED=0x%x, subcode=%x.\n",
4272 ((mcp->mb[1] << 16) | mcp->mb[0]),
4273 ((mcp->mb[3] << 16) | mcp->mb[2]));
4274 } else
4275 ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
4276 return rval;
4277}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 049807cda419..43a0a9a4556f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -7,6 +7,8 @@
7#include "qla_def.h" 7#include "qla_def.h"
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/ratelimit.h>
11#include <linux/vmalloc.h>
10#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
11 13
12#define MASK(n) ((1ULL<<(n))-1) 14#define MASK(n) ((1ULL<<(n))-1)
@@ -328,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = {
328}; 330};
329 331
330/* Device states */ 332/* Device states */
331char *qdev_state[] = { 333char *q_dev_state[] = {
332 "Unknown", 334 "Unknown",
333 "Cold", 335 "Cold",
334 "Initializing", 336 "Initializing",
@@ -339,6 +341,11 @@ char *qdev_state[] = {
339 "Quiescent", 341 "Quiescent",
340}; 342};
341 343
344char *qdev_state(uint32_t dev_state)
345{
346 return q_dev_state[dev_state];
347}
348
342/* 349/*
343 * In: 'off' is offset from CRB space in 128M pci map 350 * In: 'off' is offset from CRB space in 128M pci map
344 * Out: 'off' is 2M pci map addr 351 * Out: 'off' is 2M pci map addr
@@ -2355,9 +2362,13 @@ qla82xx_need_reset(struct qla_hw_data *ha)
2355 uint32_t drv_state; 2362 uint32_t drv_state;
2356 int rval; 2363 int rval;
2357 2364
2358 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2365 if (ha->flags.isp82xx_reset_owner)
2359 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2366 return 1;
2360 return rval; 2367 else {
2368 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2369 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2370 return rval;
2371 }
2361} 2372}
2362 2373
2363static inline void 2374static inline void
@@ -2374,8 +2385,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2374 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2385 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2375 } 2386 }
2376 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2387 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2377 ql_log(ql_log_info, vha, 0x00bb, 2388 ql_dbg(ql_dbg_init, vha, 0x00bb,
2378 "drv_state = 0x%x.\n", drv_state); 2389 "drv_state = 0x%08x.\n", drv_state);
2379 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2390 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2380} 2391}
2381 2392
@@ -3528,7 +3539,7 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3528static void 3539static void
3529qla82xx_need_reset_handler(scsi_qla_host_t *vha) 3540qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3530{ 3541{
3531 uint32_t dev_state, drv_state, drv_active; 3542 uint32_t dev_state, drv_state, drv_active, active_mask;
3532 unsigned long reset_timeout; 3543 unsigned long reset_timeout;
3533 struct qla_hw_data *ha = vha->hw; 3544 struct qla_hw_data *ha = vha->hw;
3534 struct req_que *req = ha->req_q_map[0]; 3545 struct req_que *req = ha->req_q_map[0];
@@ -3541,15 +3552,32 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3541 qla82xx_idc_lock(ha); 3552 qla82xx_idc_lock(ha);
3542 } 3553 }
3543 3554
3544 qla82xx_set_rst_ready(ha); 3555 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3556 if (!ha->flags.isp82xx_reset_owner) {
3557 ql_dbg(ql_dbg_p3p, vha, 0xb028,
3558 "reset_acknowledged by 0x%x\n", ha->portnum);
3559 qla82xx_set_rst_ready(ha);
3560 } else {
3561 active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
3562 drv_active &= active_mask;
3563 ql_dbg(ql_dbg_p3p, vha, 0xb029,
3564 "active_mask: 0x%08x\n", active_mask);
3565 }
3545 3566
3546 /* wait for 10 seconds for reset ack from all functions */ 3567 /* wait for 10 seconds for reset ack from all functions */
3547 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 3568 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3548 3569
3549 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3570 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3550 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3571 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3572 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3551 3573
3552 while (drv_state != drv_active) { 3574 ql_dbg(ql_dbg_p3p, vha, 0xb02a,
3575 "drv_state: 0x%08x, drv_active: 0x%08x, "
3576 "dev_state: 0x%08x, active_mask: 0x%08x\n",
3577 drv_state, drv_active, dev_state, active_mask);
3578
3579 while (drv_state != drv_active &&
3580 dev_state != QLA82XX_DEV_INITIALIZING) {
3553 if (time_after_eq(jiffies, reset_timeout)) { 3581 if (time_after_eq(jiffies, reset_timeout)) {
3554 ql_log(ql_log_warn, vha, 0x00b5, 3582 ql_log(ql_log_warn, vha, 0x00b5,
3555 "Reset timeout.\n"); 3583 "Reset timeout.\n");
@@ -3560,22 +3588,78 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3560 qla82xx_idc_lock(ha); 3588 qla82xx_idc_lock(ha);
3561 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3589 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3562 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3590 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3591 if (ha->flags.isp82xx_reset_owner)
3592 drv_active &= active_mask;
3593 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3563 } 3594 }
3564 3595
3565 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3596 ql_dbg(ql_dbg_p3p, vha, 0xb02b,
3597 "drv_state: 0x%08x, drv_active: 0x%08x, "
3598 "dev_state: 0x%08x, active_mask: 0x%08x\n",
3599 drv_state, drv_active, dev_state, active_mask);
3600
3566 ql_log(ql_log_info, vha, 0x00b6, 3601 ql_log(ql_log_info, vha, 0x00b6,
3567 "Device state is 0x%x = %s.\n", 3602 "Device state is 0x%x = %s.\n",
3568 dev_state, 3603 dev_state,
3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3604 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3570 3605
3571 /* Force to DEV_COLD unless someone else is starting a reset */ 3606 /* Force to DEV_COLD unless someone else is starting a reset */
3572 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3607 if (dev_state != QLA82XX_DEV_INITIALIZING &&
3608 dev_state != QLA82XX_DEV_COLD) {
3573 ql_log(ql_log_info, vha, 0x00b7, 3609 ql_log(ql_log_info, vha, 0x00b7,
3574 "HW State: COLD/RE-INIT.\n"); 3610 "HW State: COLD/RE-INIT.\n");
3575 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3611 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3612 if (ql2xmdenable) {
3613 if (qla82xx_md_collect(vha))
3614 ql_log(ql_log_warn, vha, 0xb02c,
3615 "Not able to collect minidump.\n");
3616 } else
3617 ql_log(ql_log_warn, vha, 0xb04f,
3618 "Minidump disabled.\n");
3576 } 3619 }
3577} 3620}
3578 3621
3622static void
3623qla82xx_check_md_needed(scsi_qla_host_t *vha)
3624{
3625 struct qla_hw_data *ha = vha->hw;
3626 uint16_t fw_major_version, fw_minor_version, fw_subminor_version;
3627 uint16_t fw_attributes;
3628 uint32_t fw_memory_size, mpi_capabilities;
3629 uint8_t mpi_version[3], phy_version[3];
3630
3631 if (!ha->fw_dumped) {
3632 qla2x00_get_fw_version(vha,
3633 &fw_major_version,
3634 &fw_minor_version,
3635 &fw_subminor_version,
3636 &fw_attributes, &fw_memory_size,
3637 mpi_version, &mpi_capabilities,
3638 phy_version);
3639
3640 if (fw_major_version != ha->fw_major_version ||
3641 fw_minor_version != ha->fw_minor_version ||
3642 fw_subminor_version != ha->fw_subminor_version) {
3643 ql_log(ql_log_info, vha, 0xb02d,
3644 "Firmware version differs "
3645 "Previous version: %d:%d:%d - "
3646 "New version: %d:%d:%d\n",
3647 ha->fw_major_version,
3648 ha->fw_minor_version, ha->fw_subminor_version,
3649 fw_major_version, fw_minor_version,
3650 fw_subminor_version);
3651 /* Release MiniDump resources */
3652 qla82xx_md_free(vha);
3653 /* ALlocate MiniDump resources */
3654 qla82xx_md_prep(vha);
3655 }
3656 } else
3657 ql_log(ql_log_info, vha, 0xb02e,
3658 "Firmware dump available to retrieve\n",
3659 vha->host_no);
3660}
3661
3662
3579int 3663int
3580qla82xx_check_fw_alive(scsi_qla_host_t *vha) 3664qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3581{ 3665{
@@ -3637,7 +3721,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3637 ql_log(ql_log_info, vha, 0x009b, 3721 ql_log(ql_log_info, vha, 0x009b,
3638 "Device state is 0x%x = %s.\n", 3722 "Device state is 0x%x = %s.\n",
3639 dev_state, 3723 dev_state,
3640 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3724 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3641 3725
3642 /* wait for 30 seconds for device to go ready */ 3726 /* wait for 30 seconds for device to go ready */
3643 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3727 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3659,16 +3743,18 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3659 ql_log(ql_log_info, vha, 0x009d, 3743 ql_log(ql_log_info, vha, 0x009d,
3660 "Device state is 0x%x = %s.\n", 3744 "Device state is 0x%x = %s.\n",
3661 dev_state, 3745 dev_state,
3662 dev_state < MAX_STATES ? qdev_state[dev_state] : 3746 dev_state < MAX_STATES ? qdev_state(dev_state) :
3663 "Unknown"); 3747 "Unknown");
3664 } 3748 }
3665 3749
3666 switch (dev_state) { 3750 switch (dev_state) {
3667 case QLA82XX_DEV_READY: 3751 case QLA82XX_DEV_READY:
3752 qla82xx_check_md_needed(vha);
3753 ha->flags.isp82xx_reset_owner = 0;
3668 goto exit; 3754 goto exit;
3669 case QLA82XX_DEV_COLD: 3755 case QLA82XX_DEV_COLD:
3670 rval = qla82xx_device_bootstrap(vha); 3756 rval = qla82xx_device_bootstrap(vha);
3671 goto exit; 3757 break;
3672 case QLA82XX_DEV_INITIALIZING: 3758 case QLA82XX_DEV_INITIALIZING:
3673 qla82xx_idc_unlock(ha); 3759 qla82xx_idc_unlock(ha);
3674 msleep(1000); 3760 msleep(1000);
@@ -3791,6 +3877,28 @@ int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3791 return rval; 3877 return rval;
3792} 3878}
3793 3879
3880void
3881qla82xx_set_reset_owner(scsi_qla_host_t *vha)
3882{
3883 struct qla_hw_data *ha = vha->hw;
3884 uint32_t dev_state;
3885
3886 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3887 if (dev_state == QLA82XX_DEV_READY) {
3888 ql_log(ql_log_info, vha, 0xb02f,
3889 "HW State: NEED RESET\n");
3890 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3891 QLA82XX_DEV_NEED_RESET);
3892 ha->flags.isp82xx_reset_owner = 1;
3893 ql_dbg(ql_dbg_p3p, vha, 0xb030,
3894 "reset_owner is 0x%x\n", ha->portnum);
3895 } else
3896 ql_log(ql_log_info, vha, 0xb031,
3897 "Device state is 0x%x = %s.\n",
3898 dev_state,
3899 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3900}
3901
3794/* 3902/*
3795 * qla82xx_abort_isp 3903 * qla82xx_abort_isp
3796 * Resets ISP and aborts all outstanding commands. 3904 * Resets ISP and aborts all outstanding commands.
@@ -3806,7 +3914,6 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3806{ 3914{
3807 int rval; 3915 int rval;
3808 struct qla_hw_data *ha = vha->hw; 3916 struct qla_hw_data *ha = vha->hw;
3809 uint32_t dev_state;
3810 3917
3811 if (vha->device_flags & DFLG_DEV_FAILED) { 3918 if (vha->device_flags & DFLG_DEV_FAILED) {
3812 ql_log(ql_log_warn, vha, 0x8024, 3919 ql_log(ql_log_warn, vha, 0x8024,
@@ -3816,16 +3923,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3816 ha->flags.isp82xx_reset_hdlr_active = 1; 3923 ha->flags.isp82xx_reset_hdlr_active = 1;
3817 3924
3818 qla82xx_idc_lock(ha); 3925 qla82xx_idc_lock(ha);
3819 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3926 qla82xx_set_reset_owner(vha);
3820 if (dev_state == QLA82XX_DEV_READY) {
3821 ql_log(ql_log_info, vha, 0x8025,
3822 "HW State: NEED RESET.\n");
3823 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3824 QLA82XX_DEV_NEED_RESET);
3825 } else
3826 ql_log(ql_log_info, vha, 0x8026,
3827 "Hw State: %s.\n", dev_state < MAX_STATES ?
3828 qdev_state[dev_state] : "Unknown");
3829 qla82xx_idc_unlock(ha); 3927 qla82xx_idc_unlock(ha);
3830 3928
3831 rval = qla82xx_device_state_handler(vha); 3929 rval = qla82xx_device_state_handler(vha);
@@ -4016,3 +4114,763 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
4016 } 4114 }
4017 } 4115 }
4018} 4116}
4117
4118/* Minidump related functions */
4119int
4120qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
4121{
4122 uint32_t off_value, rval = 0;
4123
4124 WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
4125 (off & 0xFFFF0000));
4126
4127 /* Read back value to make sure write has gone through */
4128 RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
4129 off_value = (off & 0x0000FFFF);
4130
4131 if (flag)
4132 WRT_REG_DWORD((void *)
4133 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
4134 data);
4135 else
4136 rval = RD_REG_DWORD((void *)
4137 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
4138
4139 return rval;
4140}
4141
4142static int
4143qla82xx_minidump_process_control(scsi_qla_host_t *vha,
4144 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4145{
4146 struct qla_hw_data *ha = vha->hw;
4147 struct qla82xx_md_entry_crb *crb_entry;
4148 uint32_t read_value, opcode, poll_time;
4149 uint32_t addr, index, crb_addr;
4150 unsigned long wtime;
4151 struct qla82xx_md_template_hdr *tmplt_hdr;
4152 uint32_t rval = QLA_SUCCESS;
4153 int i;
4154
4155 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4156 crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
4157 crb_addr = crb_entry->addr;
4158
4159 for (i = 0; i < crb_entry->op_count; i++) {
4160 opcode = crb_entry->crb_ctrl.opcode;
4161 if (opcode & QLA82XX_DBG_OPCODE_WR) {
4162 qla82xx_md_rw_32(ha, crb_addr,
4163 crb_entry->value_1, 1);
4164 opcode &= ~QLA82XX_DBG_OPCODE_WR;
4165 }
4166
4167 if (opcode & QLA82XX_DBG_OPCODE_RW) {
4168 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4169 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4170 opcode &= ~QLA82XX_DBG_OPCODE_RW;
4171 }
4172
4173 if (opcode & QLA82XX_DBG_OPCODE_AND) {
4174 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4175 read_value &= crb_entry->value_2;
4176 opcode &= ~QLA82XX_DBG_OPCODE_AND;
4177 if (opcode & QLA82XX_DBG_OPCODE_OR) {
4178 read_value |= crb_entry->value_3;
4179 opcode &= ~QLA82XX_DBG_OPCODE_OR;
4180 }
4181 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4182 }
4183
4184 if (opcode & QLA82XX_DBG_OPCODE_OR) {
4185 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4186 read_value |= crb_entry->value_3;
4187 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4188 opcode &= ~QLA82XX_DBG_OPCODE_OR;
4189 }
4190
4191 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
4192 poll_time = crb_entry->crb_strd.poll_timeout;
4193 wtime = jiffies + poll_time;
4194 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4195
4196 do {
4197 if ((read_value & crb_entry->value_2)
4198 == crb_entry->value_1)
4199 break;
4200 else if (time_after_eq(jiffies, wtime)) {
4201 /* capturing dump failed */
4202 rval = QLA_FUNCTION_FAILED;
4203 break;
4204 } else
4205 read_value = qla82xx_md_rw_32(ha,
4206 crb_addr, 0, 0);
4207 } while (1);
4208 opcode &= ~QLA82XX_DBG_OPCODE_POLL;
4209 }
4210
4211 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
4212 if (crb_entry->crb_strd.state_index_a) {
4213 index = crb_entry->crb_strd.state_index_a;
4214 addr = tmplt_hdr->saved_state_array[index];
4215 } else
4216 addr = crb_addr;
4217
4218 read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4219 index = crb_entry->crb_ctrl.state_index_v;
4220 tmplt_hdr->saved_state_array[index] = read_value;
4221 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
4222 }
4223
4224 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
4225 if (crb_entry->crb_strd.state_index_a) {
4226 index = crb_entry->crb_strd.state_index_a;
4227 addr = tmplt_hdr->saved_state_array[index];
4228 } else
4229 addr = crb_addr;
4230
4231 if (crb_entry->crb_ctrl.state_index_v) {
4232 index = crb_entry->crb_ctrl.state_index_v;
4233 read_value =
4234 tmplt_hdr->saved_state_array[index];
4235 } else
4236 read_value = crb_entry->value_1;
4237
4238 qla82xx_md_rw_32(ha, addr, read_value, 1);
4239 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
4240 }
4241
4242 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
4243 index = crb_entry->crb_ctrl.state_index_v;
4244 read_value = tmplt_hdr->saved_state_array[index];
4245 read_value <<= crb_entry->crb_ctrl.shl;
4246 read_value >>= crb_entry->crb_ctrl.shr;
4247 if (crb_entry->value_2)
4248 read_value &= crb_entry->value_2;
4249 read_value |= crb_entry->value_3;
4250 read_value += crb_entry->value_1;
4251 tmplt_hdr->saved_state_array[index] = read_value;
4252 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
4253 }
4254 crb_addr += crb_entry->crb_strd.addr_stride;
4255 }
4256 return rval;
4257}
4258
4259static void
4260qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
4261 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4262{
4263 struct qla_hw_data *ha = vha->hw;
4264 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
4265 struct qla82xx_md_entry_rdocm *ocm_hdr;
4266 uint32_t *data_ptr = *d_ptr;
4267
4268 ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
4269 r_addr = ocm_hdr->read_addr;
4270 r_stride = ocm_hdr->read_addr_stride;
4271 loop_cnt = ocm_hdr->op_count;
4272
4273 for (i = 0; i < loop_cnt; i++) {
4274 r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
4275 *data_ptr++ = cpu_to_le32(r_value);
4276 r_addr += r_stride;
4277 }
4278 *d_ptr = data_ptr;
4279}
4280
4281static void
4282qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
4283 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4284{
4285 struct qla_hw_data *ha = vha->hw;
4286 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
4287 struct qla82xx_md_entry_mux *mux_hdr;
4288 uint32_t *data_ptr = *d_ptr;
4289
4290 mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
4291 r_addr = mux_hdr->read_addr;
4292 s_addr = mux_hdr->select_addr;
4293 s_stride = mux_hdr->select_value_stride;
4294 s_value = mux_hdr->select_value;
4295 loop_cnt = mux_hdr->op_count;
4296
4297 for (i = 0; i < loop_cnt; i++) {
4298 qla82xx_md_rw_32(ha, s_addr, s_value, 1);
4299 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4300 *data_ptr++ = cpu_to_le32(s_value);
4301 *data_ptr++ = cpu_to_le32(r_value);
4302 s_value += s_stride;
4303 }
4304 *d_ptr = data_ptr;
4305}
4306
4307static void
4308qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
4309 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4310{
4311 struct qla_hw_data *ha = vha->hw;
4312 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
4313 struct qla82xx_md_entry_crb *crb_hdr;
4314 uint32_t *data_ptr = *d_ptr;
4315
4316 crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
4317 r_addr = crb_hdr->addr;
4318 r_stride = crb_hdr->crb_strd.addr_stride;
4319 loop_cnt = crb_hdr->op_count;
4320
4321 for (i = 0; i < loop_cnt; i++) {
4322 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4323 *data_ptr++ = cpu_to_le32(r_addr);
4324 *data_ptr++ = cpu_to_le32(r_value);
4325 r_addr += r_stride;
4326 }
4327 *d_ptr = data_ptr;
4328}
4329
4330static int
4331qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
4332 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4333{
4334 struct qla_hw_data *ha = vha->hw;
4335 uint32_t addr, r_addr, c_addr, t_r_addr;
4336 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
4337 unsigned long p_wait, w_time, p_mask;
4338 uint32_t c_value_w, c_value_r;
4339 struct qla82xx_md_entry_cache *cache_hdr;
4340 int rval = QLA_FUNCTION_FAILED;
4341 uint32_t *data_ptr = *d_ptr;
4342
4343 cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
4344 loop_count = cache_hdr->op_count;
4345 r_addr = cache_hdr->read_addr;
4346 c_addr = cache_hdr->control_addr;
4347 c_value_w = cache_hdr->cache_ctrl.write_value;
4348
4349 t_r_addr = cache_hdr->tag_reg_addr;
4350 t_value = cache_hdr->addr_ctrl.init_tag_value;
4351 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
4352 p_wait = cache_hdr->cache_ctrl.poll_wait;
4353 p_mask = cache_hdr->cache_ctrl.poll_mask;
4354
4355 for (i = 0; i < loop_count; i++) {
4356 qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
4357 if (c_value_w)
4358 qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
4359
4360 if (p_mask) {
4361 w_time = jiffies + p_wait;
4362 do {
4363 c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
4364 if ((c_value_r & p_mask) == 0)
4365 break;
4366 else if (time_after_eq(jiffies, w_time)) {
4367 /* capturing dump failed */
4368 ql_dbg(ql_dbg_p3p, vha, 0xb032,
4369 "c_value_r: 0x%x, poll_mask: 0x%lx, "
4370 "w_time: 0x%lx\n",
4371 c_value_r, p_mask, w_time);
4372 return rval;
4373 }
4374 } while (1);
4375 }
4376
4377 addr = r_addr;
4378 for (k = 0; k < r_cnt; k++) {
4379 r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4380 *data_ptr++ = cpu_to_le32(r_value);
4381 addr += cache_hdr->read_ctrl.read_addr_stride;
4382 }
4383 t_value += cache_hdr->addr_ctrl.tag_value_stride;
4384 }
4385 *d_ptr = data_ptr;
4386 return QLA_SUCCESS;
4387}
4388
4389static void
4390qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
4391 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4392{
4393 struct qla_hw_data *ha = vha->hw;
4394 uint32_t addr, r_addr, c_addr, t_r_addr;
4395 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
4396 uint32_t c_value_w;
4397 struct qla82xx_md_entry_cache *cache_hdr;
4398 uint32_t *data_ptr = *d_ptr;
4399
4400 cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
4401 loop_count = cache_hdr->op_count;
4402 r_addr = cache_hdr->read_addr;
4403 c_addr = cache_hdr->control_addr;
4404 c_value_w = cache_hdr->cache_ctrl.write_value;
4405
4406 t_r_addr = cache_hdr->tag_reg_addr;
4407 t_value = cache_hdr->addr_ctrl.init_tag_value;
4408 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
4409
4410 for (i = 0; i < loop_count; i++) {
4411 qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
4412 qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
4413 addr = r_addr;
4414 for (k = 0; k < r_cnt; k++) {
4415 r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4416 *data_ptr++ = cpu_to_le32(r_value);
4417 addr += cache_hdr->read_ctrl.read_addr_stride;
4418 }
4419 t_value += cache_hdr->addr_ctrl.tag_value_stride;
4420 }
4421 *d_ptr = data_ptr;
4422}
4423
4424static void
4425qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
4426 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4427{
4428 struct qla_hw_data *ha = vha->hw;
4429 uint32_t s_addr, r_addr;
4430 uint32_t r_stride, r_value, r_cnt, qid = 0;
4431 uint32_t i, k, loop_cnt;
4432 struct qla82xx_md_entry_queue *q_hdr;
4433 uint32_t *data_ptr = *d_ptr;
4434
4435 q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
4436 s_addr = q_hdr->select_addr;
4437 r_cnt = q_hdr->rd_strd.read_addr_cnt;
4438 r_stride = q_hdr->rd_strd.read_addr_stride;
4439 loop_cnt = q_hdr->op_count;
4440
4441 for (i = 0; i < loop_cnt; i++) {
4442 qla82xx_md_rw_32(ha, s_addr, qid, 1);
4443 r_addr = q_hdr->read_addr;
4444 for (k = 0; k < r_cnt; k++) {
4445 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4446 *data_ptr++ = cpu_to_le32(r_value);
4447 r_addr += r_stride;
4448 }
4449 qid += q_hdr->q_strd.queue_id_stride;
4450 }
4451 *d_ptr = data_ptr;
4452}
4453
4454static void
4455qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
4456 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4457{
4458 struct qla_hw_data *ha = vha->hw;
4459 uint32_t r_addr, r_value;
4460 uint32_t i, loop_cnt;
4461 struct qla82xx_md_entry_rdrom *rom_hdr;
4462 uint32_t *data_ptr = *d_ptr;
4463
4464 rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
4465 r_addr = rom_hdr->read_addr;
4466 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
4467
4468 for (i = 0; i < loop_cnt; i++) {
4469 qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
4470 (r_addr & 0xFFFF0000), 1);
4471 r_value = qla82xx_md_rw_32(ha,
4472 MD_DIRECT_ROM_READ_BASE +
4473 (r_addr & 0x0000FFFF), 0, 0);
4474 *data_ptr++ = cpu_to_le32(r_value);
4475 r_addr += sizeof(uint32_t);
4476 }
4477 *d_ptr = data_ptr;
4478}
4479
4480static int
4481qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
4482 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4483{
4484 struct qla_hw_data *ha = vha->hw;
4485 uint32_t r_addr, r_value, r_data;
4486 uint32_t i, j, loop_cnt;
4487 struct qla82xx_md_entry_rdmem *m_hdr;
4488 unsigned long flags;
4489 int rval = QLA_FUNCTION_FAILED;
4490 uint32_t *data_ptr = *d_ptr;
4491
4492 m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
4493 r_addr = m_hdr->read_addr;
4494 loop_cnt = m_hdr->read_data_size/16;
4495
4496 if (r_addr & 0xf) {
4497 ql_log(ql_log_warn, vha, 0xb033,
4498 "Read addr 0x%x not 16 bytes alligned\n", r_addr);
4499 return rval;
4500 }
4501
4502 if (m_hdr->read_data_size % 16) {
4503 ql_log(ql_log_warn, vha, 0xb034,
4504 "Read data[0x%x] not multiple of 16 bytes\n",
4505 m_hdr->read_data_size);
4506 return rval;
4507 }
4508
4509 ql_dbg(ql_dbg_p3p, vha, 0xb035,
4510 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
4511 __func__, r_addr, m_hdr->read_data_size, loop_cnt);
4512
4513 write_lock_irqsave(&ha->hw_lock, flags);
4514 for (i = 0; i < loop_cnt; i++) {
4515 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
4516 r_value = 0;
4517 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
4518 r_value = MIU_TA_CTL_ENABLE;
4519 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4520 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
4521 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4522
4523 for (j = 0; j < MAX_CTL_CHECK; j++) {
4524 r_value = qla82xx_md_rw_32(ha,
4525 MD_MIU_TEST_AGT_CTRL, 0, 0);
4526 if ((r_value & MIU_TA_CTL_BUSY) == 0)
4527 break;
4528 }
4529
4530 if (j >= MAX_CTL_CHECK) {
4531 printk_ratelimited(KERN_ERR
4532 "failed to read through agent\n");
4533 write_unlock_irqrestore(&ha->hw_lock, flags);
4534 return rval;
4535 }
4536
4537 for (j = 0; j < 4; j++) {
4538 r_data = qla82xx_md_rw_32(ha,
4539 MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
4540 *data_ptr++ = cpu_to_le32(r_data);
4541 }
4542 r_addr += 16;
4543 }
4544 write_unlock_irqrestore(&ha->hw_lock, flags);
4545 *d_ptr = data_ptr;
4546 return QLA_SUCCESS;
4547}
4548
4549static int
4550qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
4551{
4552 struct qla_hw_data *ha = vha->hw;
4553 uint64_t chksum = 0;
4554 uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
4555 int count = ha->md_template_size/sizeof(uint32_t);
4556
4557 while (count-- > 0)
4558 chksum += *d_ptr++;
4559 while (chksum >> 32)
4560 chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
4561 return ~chksum;
4562}
4563
4564static void
4565qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
4566 qla82xx_md_entry_hdr_t *entry_hdr, int index)
4567{
4568 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
4569 ql_dbg(ql_dbg_p3p, vha, 0xb036,
4570 "Skipping entry[%d]: "
4571 "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4572 index, entry_hdr->entry_type,
4573 entry_hdr->d_ctrl.entry_capture_mask);
4574}
4575
4576int
4577qla82xx_md_collect(scsi_qla_host_t *vha)
4578{
4579 struct qla_hw_data *ha = vha->hw;
4580 int no_entry_hdr = 0;
4581 qla82xx_md_entry_hdr_t *entry_hdr;
4582 struct qla82xx_md_template_hdr *tmplt_hdr;
4583 uint32_t *data_ptr;
4584 uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
4585 int i = 0, rval = QLA_FUNCTION_FAILED;
4586
4587 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4588 data_ptr = (uint32_t *)ha->md_dump;
4589
4590 if (ha->fw_dumped) {
4591 ql_log(ql_log_info, vha, 0xb037,
4592 "Firmware dump available to retrive\n");
4593 goto md_failed;
4594 }
4595
4596 ha->fw_dumped = 0;
4597
4598 if (!ha->md_tmplt_hdr || !ha->md_dump) {
4599 ql_log(ql_log_warn, vha, 0xb038,
4600 "Memory not allocated for minidump capture\n");
4601 goto md_failed;
4602 }
4603
4604 if (qla82xx_validate_template_chksum(vha)) {
4605 ql_log(ql_log_info, vha, 0xb039,
4606 "Template checksum validation error\n");
4607 goto md_failed;
4608 }
4609
4610 no_entry_hdr = tmplt_hdr->num_of_entries;
4611 ql_dbg(ql_dbg_p3p, vha, 0xb03a,
4612 "No of entry headers in Template: 0x%x\n", no_entry_hdr);
4613
4614 ql_dbg(ql_dbg_p3p, vha, 0xb03b,
4615 "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
4616
4617 f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
4618
4619 /* Validate whether required debug level is set */
4620 if ((f_capture_mask & 0x3) != 0x3) {
4621 ql_log(ql_log_warn, vha, 0xb03c,
4622 "Minimum required capture mask[0x%x] level not set\n",
4623 f_capture_mask);
4624 goto md_failed;
4625 }
4626 tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
4627
4628 tmplt_hdr->driver_info[0] = vha->host_no;
4629 tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
4630 (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
4631 QLA_DRIVER_BETA_VER;
4632
4633 total_data_size = ha->md_dump_size;
4634
4635 ql_dbg(ql_log_info, vha, 0xb03d,
4636 "Total minidump data_size 0x%x to be captured\n", total_data_size);
4637
4638 /* Check whether template obtained is valid */
4639 if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
4640 ql_log(ql_log_warn, vha, 0xb04e,
4641 "Bad template header entry type: 0x%x obtained\n",
4642 tmplt_hdr->entry_type);
4643 goto md_failed;
4644 }
4645
4646 entry_hdr = (qla82xx_md_entry_hdr_t *) \
4647 (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
4648
4649 /* Walk through the entry headers */
4650 for (i = 0; i < no_entry_hdr; i++) {
4651
4652 if (data_collected > total_data_size) {
4653 ql_log(ql_log_warn, vha, 0xb03e,
4654 "More MiniDump data collected: [0x%x]\n",
4655 data_collected);
4656 goto md_failed;
4657 }
4658
4659 if (!(entry_hdr->d_ctrl.entry_capture_mask &
4660 ql2xmdcapmask)) {
4661 entry_hdr->d_ctrl.driver_flags |=
4662 QLA82XX_DBG_SKIPPED_FLAG;
4663 ql_dbg(ql_dbg_p3p, vha, 0xb03f,
4664 "Skipping entry[%d]: "
4665 "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4666 i, entry_hdr->entry_type,
4667 entry_hdr->d_ctrl.entry_capture_mask);
4668 goto skip_nxt_entry;
4669 }
4670
4671 ql_dbg(ql_dbg_p3p, vha, 0xb040,
4672 "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
4673 "entry_type: 0x%x, captrue_mask: 0x%x\n",
4674 __func__, i, data_ptr, entry_hdr,
4675 entry_hdr->entry_type,
4676 entry_hdr->d_ctrl.entry_capture_mask);
4677
4678 ql_dbg(ql_dbg_p3p, vha, 0xb041,
4679 "Data collected: [0x%x], Dump size left:[0x%x]\n",
4680 data_collected, (ha->md_dump_size - data_collected));
4681
4682 /* Decode the entry type and take
4683 * required action to capture debug data */
4684 switch (entry_hdr->entry_type) {
4685 case QLA82XX_RDEND:
4686 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4687 break;
4688 case QLA82XX_CNTRL:
4689 rval = qla82xx_minidump_process_control(vha,
4690 entry_hdr, &data_ptr);
4691 if (rval != QLA_SUCCESS) {
4692 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4693 goto md_failed;
4694 }
4695 break;
4696 case QLA82XX_RDCRB:
4697 qla82xx_minidump_process_rdcrb(vha,
4698 entry_hdr, &data_ptr);
4699 break;
4700 case QLA82XX_RDMEM:
4701 rval = qla82xx_minidump_process_rdmem(vha,
4702 entry_hdr, &data_ptr);
4703 if (rval != QLA_SUCCESS) {
4704 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4705 goto md_failed;
4706 }
4707 break;
4708 case QLA82XX_BOARD:
4709 case QLA82XX_RDROM:
4710 qla82xx_minidump_process_rdrom(vha,
4711 entry_hdr, &data_ptr);
4712 break;
4713 case QLA82XX_L2DTG:
4714 case QLA82XX_L2ITG:
4715 case QLA82XX_L2DAT:
4716 case QLA82XX_L2INS:
4717 rval = qla82xx_minidump_process_l2tag(vha,
4718 entry_hdr, &data_ptr);
4719 if (rval != QLA_SUCCESS) {
4720 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4721 goto md_failed;
4722 }
4723 break;
4724 case QLA82XX_L1DAT:
4725 case QLA82XX_L1INS:
4726 qla82xx_minidump_process_l1cache(vha,
4727 entry_hdr, &data_ptr);
4728 break;
4729 case QLA82XX_RDOCM:
4730 qla82xx_minidump_process_rdocm(vha,
4731 entry_hdr, &data_ptr);
4732 break;
4733 case QLA82XX_RDMUX:
4734 qla82xx_minidump_process_rdmux(vha,
4735 entry_hdr, &data_ptr);
4736 break;
4737 case QLA82XX_QUEUE:
4738 qla82xx_minidump_process_queue(vha,
4739 entry_hdr, &data_ptr);
4740 break;
4741 case QLA82XX_RDNOP:
4742 default:
4743 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4744 break;
4745 }
4746
4747 ql_dbg(ql_dbg_p3p, vha, 0xb042,
4748 "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
4749
4750 data_collected = (uint8_t *)data_ptr -
4751 (uint8_t *)ha->md_dump;
4752skip_nxt_entry:
4753 entry_hdr = (qla82xx_md_entry_hdr_t *) \
4754 (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
4755 }
4756
4757 if (data_collected != total_data_size) {
4758 ql_dbg(ql_log_warn, vha, 0xb043,
4759 "MiniDump data mismatch: Data collected: [0x%x],"
4760 "total_data_size:[0x%x]\n",
4761 data_collected, total_data_size);
4762 goto md_failed;
4763 }
4764
4765 ql_log(ql_log_info, vha, 0xb044,
4766 "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
4767 vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
4768 ha->fw_dumped = 1;
4769 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
4770
4771md_failed:
4772 return rval;
4773}
4774
4775int
4776qla82xx_md_alloc(scsi_qla_host_t *vha)
4777{
4778 struct qla_hw_data *ha = vha->hw;
4779 int i, k;
4780 struct qla82xx_md_template_hdr *tmplt_hdr;
4781
4782 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4783
4784 if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
4785 ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
4786 ql_log(ql_log_info, vha, 0xb045,
4787 "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
4788 ql2xmdcapmask);
4789 }
4790
4791 for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
4792 if (i & ql2xmdcapmask)
4793 ha->md_dump_size += tmplt_hdr->capture_size_array[k];
4794 }
4795
4796 if (ha->md_dump) {
4797 ql_log(ql_log_warn, vha, 0xb046,
4798 "Firmware dump previously allocated.\n");
4799 return 1;
4800 }
4801
4802 ha->md_dump = vmalloc(ha->md_dump_size);
4803 if (ha->md_dump == NULL) {
4804 ql_log(ql_log_warn, vha, 0xb047,
4805 "Unable to allocate memory for Minidump size "
4806 "(0x%x).\n", ha->md_dump_size);
4807 return 1;
4808 }
4809 return 0;
4810}
4811
4812void
4813qla82xx_md_free(scsi_qla_host_t *vha)
4814{
4815 struct qla_hw_data *ha = vha->hw;
4816
4817 /* Release the template header allocated */
4818 if (ha->md_tmplt_hdr) {
4819 ql_log(ql_log_info, vha, 0xb048,
4820 "Free MiniDump template: %p, size (%d KB)\n",
4821 ha->md_tmplt_hdr, ha->md_template_size / 1024);
4822 dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
4823 ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4824 ha->md_tmplt_hdr = 0;
4825 }
4826
4827 /* Release the template data buffer allocated */
4828 if (ha->md_dump) {
4829 ql_log(ql_log_info, vha, 0xb049,
4830 "Free MiniDump memory: %p, size (%d KB)\n",
4831 ha->md_dump, ha->md_dump_size / 1024);
4832 vfree(ha->md_dump);
4833 ha->md_dump_size = 0;
4834 ha->md_dump = 0;
4835 }
4836}
4837
4838void
4839qla82xx_md_prep(scsi_qla_host_t *vha)
4840{
4841 struct qla_hw_data *ha = vha->hw;
4842 int rval;
4843
4844 /* Get Minidump template size */
4845 rval = qla82xx_md_get_template_size(vha);
4846 if (rval == QLA_SUCCESS) {
4847 ql_log(ql_log_info, vha, 0xb04a,
4848 "MiniDump Template size obtained (%d KB)\n",
4849 ha->md_template_size / 1024);
4850
4851 /* Get Minidump template */
4852 rval = qla82xx_md_get_template(vha);
4853 if (rval == QLA_SUCCESS) {
4854 ql_dbg(ql_dbg_p3p, vha, 0xb04b,
4855 "MiniDump Template obtained\n");
4856
4857 /* Allocate memory for minidump */
4858 rval = qla82xx_md_alloc(vha);
4859 if (rval == QLA_SUCCESS)
4860 ql_log(ql_log_info, vha, 0xb04c,
4861 "MiniDump memory allocated (%d KB)\n",
4862 ha->md_dump_size / 1024);
4863 else {
4864 ql_log(ql_log_info, vha, 0xb04d,
4865 "Free MiniDump template: %p, size: (%d KB)\n",
4866 ha->md_tmplt_hdr,
4867 ha->md_template_size / 1024);
4868 dma_free_coherent(&ha->pdev->dev,
4869 ha->md_template_size,
4870 ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4871 ha->md_tmplt_hdr = 0;
4872 }
4873
4874 }
4875 }
4876}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 8a21832c6693..97ee250b63bb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -484,8 +484,6 @@
484#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) 484#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
485#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) 485#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
486#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) 486#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
487
488#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
489#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) 487#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
490 488
491#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 489#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
@@ -922,4 +920,256 @@ struct ct6_dsd {
922#define M25P_INSTR_DP 0xb9 920#define M25P_INSTR_DP 0xb9
923#define M25P_INSTR_RES 0xab 921#define M25P_INSTR_RES 0xab
924 922
923/* Minidump related */
924
925/*
926 * Version of the template
927 * 4 Bytes
928 * X.Major.Minor.RELEASE
929 */
930#define QLA82XX_MINIDUMP_VERSION 0x10101
931
932/*
933 * Entry Type Defines
934 */
935#define QLA82XX_RDNOP 0
936#define QLA82XX_RDCRB 1
937#define QLA82XX_RDMUX 2
938#define QLA82XX_QUEUE 3
939#define QLA82XX_BOARD 4
940#define QLA82XX_RDSRE 5
941#define QLA82XX_RDOCM 6
942#define QLA82XX_CACHE 10
943#define QLA82XX_L1DAT 11
944#define QLA82XX_L1INS 12
945#define QLA82XX_L2DTG 21
946#define QLA82XX_L2ITG 22
947#define QLA82XX_L2DAT 23
948#define QLA82XX_L2INS 24
949#define QLA82XX_RDROM 71
950#define QLA82XX_RDMEM 72
951#define QLA82XX_CNTRL 98
952#define QLA82XX_TLHDR 99
953#define QLA82XX_RDEND 255
954
955/*
956 * Opcodes for Control Entries.
957 * These Flags are bit fields.
958 */
959#define QLA82XX_DBG_OPCODE_WR 0x01
960#define QLA82XX_DBG_OPCODE_RW 0x02
961#define QLA82XX_DBG_OPCODE_AND 0x04
962#define QLA82XX_DBG_OPCODE_OR 0x08
963#define QLA82XX_DBG_OPCODE_POLL 0x10
964#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
965#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
966#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
967
968/*
969 * Template Header and Entry Header definitions start here.
970 */
971
972/*
973 * Template Header
974 * Parts of the template header can be modified by the driver.
975 * These include the saved_state_array, capture_debug_level, driver_timestamp
976 */
977
978#define QLA82XX_DBG_STATE_ARRAY_LEN 16
979#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
980#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
981
982/*
983 * Driver Flags
984 */
985#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
986#define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */
987
988struct qla82xx_md_template_hdr {
989 uint32_t entry_type;
990 uint32_t first_entry_offset;
991 uint32_t size_of_template;
992 uint32_t capture_debug_level;
993
994 uint32_t num_of_entries;
995 uint32_t version;
996 uint32_t driver_timestamp;
997 uint32_t template_checksum;
998
999 uint32_t driver_capture_mask;
1000 uint32_t driver_info[3];
1001
1002 uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
1003 uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
1004
1005 /* markers_array used to capture some special locations on board */
1006 uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN];
1007 uint32_t num_of_free_entries; /* For internal use */
1008 uint32_t free_entry_offset; /* For internal use */
1009 uint32_t total_table_size; /* For internal use */
1010 uint32_t bkup_table_offset; /* For internal use */
1011} __packed;
1012
1013/*
1014 * Entry Header: Common to All Entry Types
1015 */
1016
1017/*
1018 * Driver Code is for driver to write some info about the entry.
1019 * Currently not used.
1020 */
1021typedef struct qla82xx_md_entry_hdr {
1022 uint32_t entry_type;
1023 uint32_t entry_size;
1024 uint32_t entry_capture_size;
1025 struct {
1026 uint8_t entry_capture_mask;
1027 uint8_t entry_code;
1028 uint8_t driver_code;
1029 uint8_t driver_flags;
1030 } d_ctrl;
1031} __packed qla82xx_md_entry_hdr_t;
1032
1033/*
1034 * Read CRB entry header
1035 */
1036struct qla82xx_md_entry_crb {
1037 qla82xx_md_entry_hdr_t h;
1038 uint32_t addr;
1039 struct {
1040 uint8_t addr_stride;
1041 uint8_t state_index_a;
1042 uint16_t poll_timeout;
1043 } crb_strd;
1044
1045 uint32_t data_size;
1046 uint32_t op_count;
1047
1048 struct {
1049 uint8_t opcode;
1050 uint8_t state_index_v;
1051 uint8_t shl;
1052 uint8_t shr;
1053 } crb_ctrl;
1054
1055 uint32_t value_1;
1056 uint32_t value_2;
1057 uint32_t value_3;
1058} __packed;
1059
1060/*
1061 * Cache entry header
1062 */
1063struct qla82xx_md_entry_cache {
1064 qla82xx_md_entry_hdr_t h;
1065
1066 uint32_t tag_reg_addr;
1067 struct {
1068 uint16_t tag_value_stride;
1069 uint16_t init_tag_value;
1070 } addr_ctrl;
1071
1072 uint32_t data_size;
1073 uint32_t op_count;
1074
1075 uint32_t control_addr;
1076 struct {
1077 uint16_t write_value;
1078 uint8_t poll_mask;
1079 uint8_t poll_wait;
1080 } cache_ctrl;
1081
1082 uint32_t read_addr;
1083 struct {
1084 uint8_t read_addr_stride;
1085 uint8_t read_addr_cnt;
1086 uint16_t rsvd_1;
1087 } read_ctrl;
1088} __packed;
1089
1090/*
1091 * Read OCM
1092 */
1093struct qla82xx_md_entry_rdocm {
1094 qla82xx_md_entry_hdr_t h;
1095
1096 uint32_t rsvd_0;
1097 uint32_t rsvd_1;
1098 uint32_t data_size;
1099 uint32_t op_count;
1100
1101 uint32_t rsvd_2;
1102 uint32_t rsvd_3;
1103 uint32_t read_addr;
1104 uint32_t read_addr_stride;
1105 uint32_t read_addr_cntrl;
1106} __packed;
1107
1108/*
1109 * Read Memory
1110 */
1111struct qla82xx_md_entry_rdmem {
1112 qla82xx_md_entry_hdr_t h;
1113 uint32_t rsvd[6];
1114 uint32_t read_addr;
1115 uint32_t read_data_size;
1116} __packed;
1117
1118/*
1119 * Read ROM
1120 */
1121struct qla82xx_md_entry_rdrom {
1122 qla82xx_md_entry_hdr_t h;
1123 uint32_t rsvd[6];
1124 uint32_t read_addr;
1125 uint32_t read_data_size;
1126} __packed;
1127
1128struct qla82xx_md_entry_mux {
1129 qla82xx_md_entry_hdr_t h;
1130
1131 uint32_t select_addr;
1132 uint32_t rsvd_0;
1133 uint32_t data_size;
1134 uint32_t op_count;
1135
1136 uint32_t select_value;
1137 uint32_t select_value_stride;
1138 uint32_t read_addr;
1139 uint32_t rsvd_1;
1140} __packed;
1141
1142struct qla82xx_md_entry_queue {
1143 qla82xx_md_entry_hdr_t h;
1144
1145 uint32_t select_addr;
1146 struct {
1147 uint16_t queue_id_stride;
1148 uint16_t rsvd_0;
1149 } q_strd;
1150
1151 uint32_t data_size;
1152 uint32_t op_count;
1153 uint32_t rsvd_1;
1154 uint32_t rsvd_2;
1155
1156 uint32_t read_addr;
1157 struct {
1158 uint8_t read_addr_stride;
1159 uint8_t read_addr_cnt;
1160 uint16_t rsvd_3;
1161 } rd_strd;
1162} __packed;
1163
1164#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
1165#define RQST_TMPLT_SIZE 0x0
1166#define RQST_TMPLT 0x1
1167#define MD_DIRECT_ROM_WINDOW 0x42110030
1168#define MD_DIRECT_ROM_READ_BASE 0x42150000
1169#define MD_MIU_TEST_AGT_CTRL 0x41000090
1170#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1171#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1172
1173static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
1174 0x410000B8, 0x410000BC };
925#endif 1175#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4cace3f20c04..e37556ce211f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -160,9 +160,9 @@ MODULE_PARM_DESC(ql2xetsenable,
160int ql2xdbwr = 1; 160int ql2xdbwr = 1;
161module_param(ql2xdbwr, int, S_IRUGO); 161module_param(ql2xdbwr, int, S_IRUGO);
162MODULE_PARM_DESC(ql2xdbwr, 162MODULE_PARM_DESC(ql2xdbwr,
163 "Option to specify scheme for request queue posting.\n" 163 "Option to specify scheme for request queue posting.\n"
164 " 0 -- Regular doorbell.\n" 164 " 0 -- Regular doorbell.\n"
165 " 1 -- CAMRAM doorbell (faster).\n"); 165 " 1 -- CAMRAM doorbell (faster).\n");
166 166
167int ql2xtargetreset = 1; 167int ql2xtargetreset = 1;
168module_param(ql2xtargetreset, int, S_IRUGO); 168module_param(ql2xtargetreset, int, S_IRUGO);
@@ -185,9 +185,9 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
185int ql2xdontresethba; 185int ql2xdontresethba;
186module_param(ql2xdontresethba, int, S_IRUGO); 186module_param(ql2xdontresethba, int, S_IRUGO);
187MODULE_PARM_DESC(ql2xdontresethba, 187MODULE_PARM_DESC(ql2xdontresethba,
188 "Option to specify reset behaviour.\n" 188 "Option to specify reset behaviour.\n"
189 " 0 (Default) -- Reset on failure.\n" 189 " 0 (Default) -- Reset on failure.\n"
190 " 1 -- Do not reset on failure.\n"); 190 " 1 -- Do not reset on failure.\n");
191 191
192uint ql2xmaxlun = MAX_LUNS; 192uint ql2xmaxlun = MAX_LUNS;
193module_param(ql2xmaxlun, uint, S_IRUGO); 193module_param(ql2xmaxlun, uint, S_IRUGO);
@@ -195,6 +195,19 @@ MODULE_PARM_DESC(ql2xmaxlun,
195 "Defines the maximum LU number to register with the SCSI " 195 "Defines the maximum LU number to register with the SCSI "
196 "midlayer. Default is 65535."); 196 "midlayer. Default is 65535.");
197 197
198int ql2xmdcapmask = 0x1F;
199module_param(ql2xmdcapmask, int, S_IRUGO);
200MODULE_PARM_DESC(ql2xmdcapmask,
201 "Set the Minidump driver capture mask level. "
202 "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
203
204int ql2xmdenable;
205module_param(ql2xmdenable, int, S_IRUGO);
206MODULE_PARM_DESC(ql2xmdenable,
207 "Enable/disable MiniDump. "
208 "0 (Default) - MiniDump disabled. "
209 "1 - MiniDump enabled.");
210
198/* 211/*
199 * SCSI host template entry points 212 * SCSI host template entry points
200 */ 213 */
@@ -2669,6 +2682,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2669 2682
2670 qla2x00_mem_free(ha); 2683 qla2x00_mem_free(ha);
2671 2684
2685 qla82xx_md_free(vha);
2686
2672 qla2x00_free_queues(ha); 2687 qla2x00_free_queues(ha);
2673} 2688}
2674 2689