aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 19:44:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 19:44:18 -0400
commitec7ae517537ae5c7b0b2cd7f562dfa3e7a05b954 (patch)
treee6b0c64a51a7c0aa0efd09d4f7a80872e3b1657a /drivers/scsi/qla2xxx
parent97d2eb13a019ec09cc1a7ea2d3705c0b117b3c0d (diff)
parent590134fa78fbdbe5fea78c7ae0b2c3364bc9572f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (204 commits) [SCSI] qla4xxx: export address/port of connection (fix udev disk names) [SCSI] ipr: Fix BUG on adapter dump timeout [SCSI] megaraid_sas: Fix instance access in megasas_reset_timer [SCSI] hpsa: change confusing message to be more clear [SCSI] iscsi class: fix vlan configuration [SCSI] qla4xxx: fix data alignment and use nl helpers [SCSI] iscsi class: fix link local mispelling [SCSI] iscsi class: Replace iscsi_get_next_target_id with IDA [SCSI] aacraid: use lower snprintf() limit [SCSI] lpfc 8.3.27: Change driver version to 8.3.27 [SCSI] lpfc 8.3.27: T10 additions for SLI4 [SCSI] lpfc 8.3.27: Fix queue allocation failure recovery [SCSI] lpfc 8.3.27: Change algorithm for getting physical port name [SCSI] lpfc 8.3.27: Changed worst case mailbox timeout [SCSI] lpfc 8.3.27: Miscellanous logic and interface fixes [SCSI] megaraid_sas: Changelog and version update [SCSI] megaraid_sas: Add driver workaround for PERC5/1068 kdump kernel panic [SCSI] megaraid_sas: Add multiple MSI-X vector/multiple reply queue support [SCSI] megaraid_sas: Add support for MegaRAID 9360/9380 12GB/s controllers [SCSI] megaraid_sas: Clear FUSION_IN_RESET before enabling interrupts ...
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c152
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h42
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c127
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c970
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h255
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c46
12 files changed, 1671 insertions, 106 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index a31e05f3bfd4..ac326c41e931 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -23,11 +23,23 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
23 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 23 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24 struct device, kobj))); 24 struct device, kobj)));
25 struct qla_hw_data *ha = vha->hw; 25 struct qla_hw_data *ha = vha->hw;
26 int rval = 0;
26 27
27 if (ha->fw_dump_reading == 0) 28 if (ha->fw_dump_reading == 0)
28 return 0; 29 return 0;
29 30
30 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 31 if (IS_QLA82XX(ha)) {
32 if (off < ha->md_template_size) {
33 rval = memory_read_from_buffer(buf, count,
34 &off, ha->md_tmplt_hdr, ha->md_template_size);
35 return rval;
36 }
37 off -= ha->md_template_size;
38 rval = memory_read_from_buffer(buf, count,
39 &off, ha->md_dump, ha->md_dump_size);
40 return rval;
41 } else
42 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
31 ha->fw_dump_len); 43 ha->fw_dump_len);
32} 44}
33 45
@@ -41,12 +53,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
41 struct qla_hw_data *ha = vha->hw; 53 struct qla_hw_data *ha = vha->hw;
42 int reading; 54 int reading;
43 55
44 if (IS_QLA82XX(ha)) {
45 ql_dbg(ql_dbg_user, vha, 0x705b,
46 "Firmware dump not supported for ISP82xx\n");
47 return count;
48 }
49
50 if (off != 0) 56 if (off != 0)
51 return (0); 57 return (0);
52 58
@@ -59,6 +65,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
59 ql_log(ql_log_info, vha, 0x705d, 65 ql_log(ql_log_info, vha, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha->host_no); 66 "Firmware dump cleared on (%ld).\n", vha->host_no);
61 67
68 if (IS_QLA82XX(vha->hw)) {
69 qla82xx_md_free(vha);
70 qla82xx_md_prep(vha);
71 }
62 ha->fw_dump_reading = 0; 72 ha->fw_dump_reading = 0;
63 ha->fw_dumped = 0; 73 ha->fw_dumped = 0;
64 break; 74 break;
@@ -75,10 +85,29 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
75 qla2x00_alloc_fw_dump(vha); 85 qla2x00_alloc_fw_dump(vha);
76 break; 86 break;
77 case 3: 87 case 3:
78 qla2x00_system_error(vha); 88 if (IS_QLA82XX(ha)) {
89 qla82xx_idc_lock(ha);
90 qla82xx_set_reset_owner(vha);
91 qla82xx_idc_unlock(ha);
92 } else
93 qla2x00_system_error(vha);
94 break;
95 case 4:
96 if (IS_QLA82XX(ha)) {
97 if (ha->md_tmplt_hdr)
98 ql_dbg(ql_dbg_user, vha, 0x705b,
99 "MiniDump supported with this firmware.\n");
100 else
101 ql_dbg(ql_dbg_user, vha, 0x709d,
102 "MiniDump not supported with this firmware.\n");
103 }
104 break;
105 case 5:
106 if (IS_QLA82XX(ha))
107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
79 break; 108 break;
80 } 109 }
81 return (count); 110 return -EINVAL;
82} 111}
83 112
84static struct bin_attribute sysfs_fw_dump_attr = { 113static struct bin_attribute sysfs_fw_dump_attr = {
@@ -122,7 +151,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
122 151
123 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || 152 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
124 !ha->isp_ops->write_nvram) 153 !ha->isp_ops->write_nvram)
125 return 0; 154 return -EINVAL;
126 155
127 /* Checksum NVRAM. */ 156 /* Checksum NVRAM. */
128 if (IS_FWI2_CAPABLE(ha)) { 157 if (IS_FWI2_CAPABLE(ha)) {
@@ -165,7 +194,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
165 qla2xxx_wake_dpc(vha); 194 qla2xxx_wake_dpc(vha);
166 qla2x00_wait_for_chip_reset(vha); 195 qla2x00_wait_for_chip_reset(vha);
167 196
168 return (count); 197 return count;
169} 198}
170 199
171static struct bin_attribute sysfs_nvram_attr = { 200static struct bin_attribute sysfs_nvram_attr = {
@@ -239,10 +268,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
239 int val, valid; 268 int val, valid;
240 269
241 if (off) 270 if (off)
242 return 0; 271 return -EINVAL;
243 272
244 if (unlikely(pci_channel_offline(ha->pdev))) 273 if (unlikely(pci_channel_offline(ha->pdev)))
245 return 0; 274 return -EAGAIN;
246 275
247 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 276 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
248 return -EINVAL; 277 return -EINVAL;
@@ -253,7 +282,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
253 case 0: 282 case 0:
254 if (ha->optrom_state != QLA_SREADING && 283 if (ha->optrom_state != QLA_SREADING &&
255 ha->optrom_state != QLA_SWRITING) 284 ha->optrom_state != QLA_SWRITING)
256 break; 285 return -EINVAL;
257 286
258 ha->optrom_state = QLA_SWAITING; 287 ha->optrom_state = QLA_SWAITING;
259 288
@@ -266,7 +295,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
266 break; 295 break;
267 case 1: 296 case 1:
268 if (ha->optrom_state != QLA_SWAITING) 297 if (ha->optrom_state != QLA_SWAITING)
269 break; 298 return -EINVAL;
270 299
271 ha->optrom_region_start = start; 300 ha->optrom_region_start = start;
272 ha->optrom_region_size = start + size > ha->optrom_size ? 301 ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -280,7 +309,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
280 "(%x).\n", ha->optrom_region_size); 309 "(%x).\n", ha->optrom_region_size);
281 310
282 ha->optrom_state = QLA_SWAITING; 311 ha->optrom_state = QLA_SWAITING;
283 return count; 312 return -ENOMEM;
284 } 313 }
285 314
286 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 315 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
@@ -299,7 +328,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
299 break; 328 break;
300 case 2: 329 case 2:
301 if (ha->optrom_state != QLA_SWAITING) 330 if (ha->optrom_state != QLA_SWAITING)
302 break; 331 return -EINVAL;
303 332
304 /* 333 /*
305 * We need to be more restrictive on which FLASH regions are 334 * We need to be more restrictive on which FLASH regions are
@@ -347,7 +376,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
347 "(%x)\n", ha->optrom_region_size); 376 "(%x)\n", ha->optrom_region_size);
348 377
349 ha->optrom_state = QLA_SWAITING; 378 ha->optrom_state = QLA_SWAITING;
350 return count; 379 return -ENOMEM;
351 } 380 }
352 381
353 ql_dbg(ql_dbg_user, vha, 0x7067, 382 ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -358,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
358 break; 387 break;
359 case 3: 388 case 3:
360 if (ha->optrom_state != QLA_SWRITING) 389 if (ha->optrom_state != QLA_SWRITING)
361 break; 390 return -ENOMEM;
362 391
363 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 392 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
364 ql_log(ql_log_warn, vha, 0x7068, 393 ql_log(ql_log_warn, vha, 0x7068,
@@ -374,7 +403,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
374 ha->optrom_region_start, ha->optrom_region_size); 403 ha->optrom_region_start, ha->optrom_region_size);
375 break; 404 break;
376 default: 405 default:
377 count = -EINVAL; 406 return -EINVAL;
378 } 407 }
379 return count; 408 return count;
380} 409}
@@ -398,10 +427,10 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
398 struct qla_hw_data *ha = vha->hw; 427 struct qla_hw_data *ha = vha->hw;
399 428
400 if (unlikely(pci_channel_offline(ha->pdev))) 429 if (unlikely(pci_channel_offline(ha->pdev)))
401 return 0; 430 return -EAGAIN;
402 431
403 if (!capable(CAP_SYS_ADMIN)) 432 if (!capable(CAP_SYS_ADMIN))
404 return 0; 433 return -EINVAL;
405 434
406 if (IS_NOCACHE_VPD_TYPE(ha)) 435 if (IS_NOCACHE_VPD_TYPE(ha))
407 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 436 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
@@ -438,17 +467,17 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
438 467
439 /* Update flash version information for 4Gb & above. */ 468 /* Update flash version information for 4Gb & above. */
440 if (!IS_FWI2_CAPABLE(ha)) 469 if (!IS_FWI2_CAPABLE(ha))
441 goto done; 470 return -EINVAL;
442 471
443 tmp_data = vmalloc(256); 472 tmp_data = vmalloc(256);
444 if (!tmp_data) { 473 if (!tmp_data) {
445 ql_log(ql_log_warn, vha, 0x706b, 474 ql_log(ql_log_warn, vha, 0x706b,
446 "Unable to allocate memory for VPD information update.\n"); 475 "Unable to allocate memory for VPD information update.\n");
447 goto done; 476 return -ENOMEM;
448 } 477 }
449 ha->isp_ops->get_flash_version(vha, tmp_data); 478 ha->isp_ops->get_flash_version(vha, tmp_data);
450 vfree(tmp_data); 479 vfree(tmp_data);
451done: 480
452 return count; 481 return count;
453} 482}
454 483
@@ -505,8 +534,7 @@ do_read:
505 "Unable to read SFP data (%x/%x/%x).\n", rval, 534 "Unable to read SFP data (%x/%x/%x).\n", rval,
506 addr, offset); 535 addr, offset);
507 536
508 count = 0; 537 return -EIO;
509 break;
510 } 538 }
511 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); 539 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
512 buf += SFP_BLOCK_SIZE; 540 buf += SFP_BLOCK_SIZE;
@@ -536,7 +564,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
536 int type; 564 int type;
537 565
538 if (off != 0) 566 if (off != 0)
539 return 0; 567 return -EINVAL;
540 568
541 type = simple_strtol(buf, NULL, 10); 569 type = simple_strtol(buf, NULL, 10);
542 switch (type) { 570 switch (type) {
@@ -546,13 +574,18 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
546 574
547 scsi_block_requests(vha->host); 575 scsi_block_requests(vha->host);
548 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 576 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
577 if (IS_QLA82XX(ha)) {
578 qla82xx_idc_lock(ha);
579 qla82xx_set_reset_owner(vha);
580 qla82xx_idc_unlock(ha);
581 }
549 qla2xxx_wake_dpc(vha); 582 qla2xxx_wake_dpc(vha);
550 qla2x00_wait_for_chip_reset(vha); 583 qla2x00_wait_for_chip_reset(vha);
551 scsi_unblock_requests(vha->host); 584 scsi_unblock_requests(vha->host);
552 break; 585 break;
553 case 0x2025d: 586 case 0x2025d:
554 if (!IS_QLA81XX(ha)) 587 if (!IS_QLA81XX(ha))
555 break; 588 return -EPERM;
556 589
557 ql_log(ql_log_info, vha, 0x706f, 590 ql_log(ql_log_info, vha, 0x706f,
558 "Issuing MPI reset.\n"); 591 "Issuing MPI reset.\n");
@@ -571,7 +604,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
571 if (!IS_QLA82XX(ha) || vha != base_vha) { 604 if (!IS_QLA82XX(ha) || vha != base_vha) {
572 ql_log(ql_log_info, vha, 0x7071, 605 ql_log(ql_log_info, vha, 0x7071,
573 "FCoE ctx reset no supported.\n"); 606 "FCoE ctx reset no supported.\n");
574 return count; 607 return -EPERM;
575 } 608 }
576 609
577 ql_log(ql_log_info, vha, 0x7072, 610 ql_log(ql_log_info, vha, 0x7072,
@@ -607,7 +640,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
607 ha->edc_data_len = 0; 640 ha->edc_data_len = 0;
608 641
609 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) 642 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
610 return 0; 643 return -EINVAL;
611 644
612 if (!ha->edc_data) { 645 if (!ha->edc_data) {
613 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 646 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -615,7 +648,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
615 if (!ha->edc_data) { 648 if (!ha->edc_data) {
616 ql_log(ql_log_warn, vha, 0x7073, 649 ql_log(ql_log_warn, vha, 0x7073,
617 "Unable to allocate memory for EDC write.\n"); 650 "Unable to allocate memory for EDC write.\n");
618 return 0; 651 return -ENOMEM;
619 } 652 }
620 } 653 }
621 654
@@ -634,9 +667,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
634 dev, adr, len, opt); 667 dev, adr, len, opt);
635 if (rval != QLA_SUCCESS) { 668 if (rval != QLA_SUCCESS) {
636 ql_log(ql_log_warn, vha, 0x7074, 669 ql_log(ql_log_warn, vha, 0x7074,
637 "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n", 670 "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
638 rval, dev, adr, opt, len, buf[8]); 671 rval, dev, adr, opt, len, buf[8]);
639 return 0; 672 return -EIO;
640 } 673 }
641 674
642 return count; 675 return count;
@@ -665,7 +698,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
665 ha->edc_data_len = 0; 698 ha->edc_data_len = 0;
666 699
667 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) 700 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
668 return 0; 701 return -EINVAL;
669 702
670 if (!ha->edc_data) { 703 if (!ha->edc_data) {
671 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 704 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -673,7 +706,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
673 if (!ha->edc_data) { 706 if (!ha->edc_data) {
674 ql_log(ql_log_warn, vha, 0x708c, 707 ql_log(ql_log_warn, vha, 0x708c,
675 "Unable to allocate memory for EDC status.\n"); 708 "Unable to allocate memory for EDC status.\n");
676 return 0; 709 return -ENOMEM;
677 } 710 }
678 } 711 }
679 712
@@ -693,7 +726,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
693 ql_log(ql_log_info, vha, 0x7075, 726 ql_log(ql_log_info, vha, 0x7075,
694 "Unable to write EDC status (%x) %02x:%04x:%02x.\n", 727 "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
695 rval, dev, adr, opt, len); 728 rval, dev, adr, opt, len);
696 return 0; 729 return -EIO;
697 } 730 }
698 731
699 ha->edc_data_len = len; 732 ha->edc_data_len = len;
@@ -805,7 +838,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
805 if (!ha->dcbx_tlv) { 838 if (!ha->dcbx_tlv) {
806 ql_log(ql_log_warn, vha, 0x7078, 839 ql_log(ql_log_warn, vha, 0x7078,
807 "Unable to allocate memory for DCBX TLV read-data.\n"); 840 "Unable to allocate memory for DCBX TLV read-data.\n");
808 return 0; 841 return -ENOMEM;
809 } 842 }
810 843
811do_read: 844do_read:
@@ -817,7 +850,7 @@ do_read:
817 if (rval != QLA_SUCCESS) { 850 if (rval != QLA_SUCCESS) {
818 ql_log(ql_log_warn, vha, 0x7079, 851 ql_log(ql_log_warn, vha, 0x7079,
819 "Unable to read DCBX TLV (%x).\n", rval); 852 "Unable to read DCBX TLV (%x).\n", rval);
820 count = 0; 853 return -EIO;
821 } 854 }
822 855
823 memcpy(buf, ha->dcbx_tlv, count); 856 memcpy(buf, ha->dcbx_tlv, count);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 07d1767cd26b..8b641a8a0c74 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -704,6 +704,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
704 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 704 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
705 705
706 if ((ha->current_topology == ISP_CFG_F || 706 if ((ha->current_topology == ISP_CFG_F ||
707 (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
707 (IS_QLA81XX(ha) && 708 (IS_QLA81XX(ha) &&
708 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 709 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
709 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 710 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
@@ -1447,6 +1448,148 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1447} 1448}
1448 1449
1449static int 1450static int
1451qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1452{
1453 struct Scsi_Host *host = bsg_job->shost;
1454 scsi_qla_host_t *vha = shost_priv(host);
1455 struct qla_hw_data *ha = vha->hw;
1456 int rval = 0;
1457 uint8_t bsg[DMA_POOL_SIZE];
1458 struct qla_image_version_list *list = (void *)bsg;
1459 struct qla_image_version *image;
1460 uint32_t count;
1461 dma_addr_t sfp_dma;
1462 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1463 if (!sfp) {
1464 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1465 EXT_STATUS_NO_MEMORY;
1466 goto done;
1467 }
1468
1469 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1470 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1471
1472 image = list->version;
1473 count = list->count;
1474 while (count--) {
1475 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1476 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1477 image->field_address.device, image->field_address.offset,
1478 sizeof(image->field_info), image->field_address.option);
1479 if (rval) {
1480 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1481 EXT_STATUS_MAILBOX;
1482 goto dealloc;
1483 }
1484 image++;
1485 }
1486
1487 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1488
1489dealloc:
1490 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1491
1492done:
1493 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1494 bsg_job->reply->result = DID_OK << 16;
1495 bsg_job->job_done(bsg_job);
1496
1497 return 0;
1498}
1499
1500static int
1501qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1502{
1503 struct Scsi_Host *host = bsg_job->shost;
1504 scsi_qla_host_t *vha = shost_priv(host);
1505 struct qla_hw_data *ha = vha->hw;
1506 int rval = 0;
1507 uint8_t bsg[DMA_POOL_SIZE];
1508 struct qla_status_reg *sr = (void *)bsg;
1509 dma_addr_t sfp_dma;
1510 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1511 if (!sfp) {
1512 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1513 EXT_STATUS_NO_MEMORY;
1514 goto done;
1515 }
1516
1517 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1518 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1519
1520 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1521 sr->field_address.device, sr->field_address.offset,
1522 sizeof(sr->status_reg), sr->field_address.option);
1523 sr->status_reg = *sfp;
1524
1525 if (rval) {
1526 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1527 EXT_STATUS_MAILBOX;
1528 goto dealloc;
1529 }
1530
1531 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1532 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1533
1534 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1535
1536dealloc:
1537 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1538
1539done:
1540 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1541 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1542 bsg_job->reply->result = DID_OK << 16;
1543 bsg_job->job_done(bsg_job);
1544
1545 return 0;
1546}
1547
1548static int
1549qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1550{
1551 struct Scsi_Host *host = bsg_job->shost;
1552 scsi_qla_host_t *vha = shost_priv(host);
1553 struct qla_hw_data *ha = vha->hw;
1554 int rval = 0;
1555 uint8_t bsg[DMA_POOL_SIZE];
1556 struct qla_status_reg *sr = (void *)bsg;
1557 dma_addr_t sfp_dma;
1558 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1559 if (!sfp) {
1560 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1561 EXT_STATUS_NO_MEMORY;
1562 goto done;
1563 }
1564
1565 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1566 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1567
1568 *sfp = sr->status_reg;
1569 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1570 sr->field_address.device, sr->field_address.offset,
1571 sizeof(sr->status_reg), sr->field_address.option);
1572
1573 if (rval) {
1574 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1575 EXT_STATUS_MAILBOX;
1576 goto dealloc;
1577 }
1578
1579 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1580
1581dealloc:
1582 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1583
1584done:
1585 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1586 bsg_job->reply->result = DID_OK << 16;
1587 bsg_job->job_done(bsg_job);
1588
1589 return 0;
1590}
1591
1592static int
1450qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 1593qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1451{ 1594{
1452 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 1595 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1474,6 +1617,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1474 case QL_VND_UPDATE_FLASH: 1617 case QL_VND_UPDATE_FLASH:
1475 return qla2x00_update_optrom(bsg_job); 1618 return qla2x00_update_optrom(bsg_job);
1476 1619
1620 case QL_VND_SET_FRU_VERSION:
1621 return qla2x00_update_fru_versions(bsg_job);
1622
1623 case QL_VND_READ_FRU_STATUS:
1624 return qla2x00_read_fru_status(bsg_job);
1625
1626 case QL_VND_WRITE_FRU_STATUS:
1627 return qla2x00_write_fru_status(bsg_job);
1628
1477 default: 1629 default:
1478 bsg_job->reply->result = (DID_ERROR << 16); 1630 bsg_job->reply->result = (DID_ERROR << 16);
1479 bsg_job->job_done(bsg_job); 1631 bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 0f0f54e35f06..70caa63a8930 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -16,6 +16,16 @@
16#define QL_VND_FCP_PRIO_CFG_CMD 0x06 16#define QL_VND_FCP_PRIO_CFG_CMD 0x06
17#define QL_VND_READ_FLASH 0x07 17#define QL_VND_READ_FLASH 0x07
18#define QL_VND_UPDATE_FLASH 0x08 18#define QL_VND_UPDATE_FLASH 0x08
19#define QL_VND_SET_FRU_VERSION 0x0B
20#define QL_VND_READ_FRU_STATUS 0x0C
21#define QL_VND_WRITE_FRU_STATUS 0x0D
22
23/* BSG Vendor specific subcode returns */
24#define EXT_STATUS_OK 0
25#define EXT_STATUS_ERR 1
26#define EXT_STATUS_INVALID_PARAM 6
27#define EXT_STATUS_MAILBOX 11
28#define EXT_STATUS_NO_MEMORY 17
19 29
20/* BSG definations for interpreting CommandSent field */ 30/* BSG definations for interpreting CommandSent field */
21#define INT_DEF_LB_LOOPBACK_CMD 0 31#define INT_DEF_LB_LOOPBACK_CMD 0
@@ -141,4 +151,36 @@ struct qla_port_param {
141 uint16_t mode; 151 uint16_t mode;
142 uint16_t speed; 152 uint16_t speed;
143} __attribute__ ((packed)); 153} __attribute__ ((packed));
154
155
156/* FRU VPD */
157
158#define MAX_FRU_SIZE 36
159
160struct qla_field_address {
161 uint16_t offset;
162 uint16_t device;
163 uint16_t option;
164} __packed;
165
166struct qla_field_info {
167 uint8_t version[MAX_FRU_SIZE];
168} __packed;
169
170struct qla_image_version {
171 struct qla_field_address field_address;
172 struct qla_field_info field_info;
173} __packed;
174
175struct qla_image_version_list {
176 uint32_t count;
177 struct qla_image_version version[0];
178} __packed;
179
180struct qla_status_reg {
181 struct qla_field_address field_address;
182 uint8_t status_reg;
183 uint8_t reserved[7];
184} __packed;
185
144#endif 186#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d79cd8a5f831..9df4787715c0 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,7 +12,7 @@
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0116 | | 14 * | Module Init and Probe | 0x0116 | |
15 * | Mailbox commands | 0x1126 | | 15 * | Mailbox commands | 0x1129 | |
16 * | Device Discovery | 0x2083 | | 16 * | Device Discovery | 0x2083 | |
17 * | Queue Command and IO tracing | 0x302e | 0x3008 | 17 * | Queue Command and IO tracing | 0x302e | 0x3008 |
18 * | DPC Thread | 0x401c | | 18 * | DPC Thread | 0x401c | |
@@ -22,7 +22,7 @@
22 * | Task Management | 0x8041 | | 22 * | Task Management | 0x8041 | |
23 * | AER/EEH | 0x900f | | 23 * | AER/EEH | 0x900f | |
24 * | Virtual Port | 0xa007 | | 24 * | Virtual Port | 0xa007 | |
25 * | ISP82XX Specific | 0xb04f | | 25 * | ISP82XX Specific | 0xb051 | |
26 * | MultiQ | 0xc00b | | 26 * | MultiQ | 0xc00b | |
27 * | Misc | 0xd00b | | 27 * | Misc | 0xd00b | |
28 * ---------------------------------------------------------------------- 28 * ----------------------------------------------------------------------
@@ -403,7 +403,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
403 return ptr + sizeof(struct qla2xxx_mq_chain); 403 return ptr + sizeof(struct qla2xxx_mq_chain);
404} 404}
405 405
406static void 406void
407qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 407qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
408{ 408{
409 struct qla_hw_data *ha = vha->hw; 409 struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a03eaf40f377..fcf052c50bf5 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2438,7 +2438,8 @@ struct qla_hw_data {
2438 uint32_t quiesce_owner:1; 2438 uint32_t quiesce_owner:1;
2439 uint32_t thermal_supported:1; 2439 uint32_t thermal_supported:1;
2440 uint32_t isp82xx_reset_hdlr_active:1; 2440 uint32_t isp82xx_reset_hdlr_active:1;
2441 /* 26 bits */ 2441 uint32_t isp82xx_reset_owner:1;
2442 /* 28 bits */
2442 } flags; 2443 } flags;
2443 2444
2444 /* This spinlock is used to protect "io transactions", you must 2445 /* This spinlock is used to protect "io transactions", you must
@@ -2822,6 +2823,12 @@ struct qla_hw_data {
2822 2823
2823 uint8_t fw_type; 2824 uint8_t fw_type;
2824 __le32 file_prd_off; /* File firmware product offset */ 2825 __le32 file_prd_off; /* File firmware product offset */
2826
2827 uint32_t md_template_size;
2828 void *md_tmplt_hdr;
2829 dma_addr_t md_tmplt_hdr_dma;
2830 void *md_dump;
2831 uint32_t md_dump_size;
2825}; 2832};
2826 2833
2827/* 2834/*
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 29b1a3e28231..ce32d8135c9e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -104,6 +104,8 @@ extern int ql2xenablehba_err_chk;
104extern int ql2xtargetreset; 104extern int ql2xtargetreset;
105extern int ql2xdontresethba; 105extern int ql2xdontresethba;
106extern unsigned int ql2xmaxlun; 106extern unsigned int ql2xmaxlun;
107extern int ql2xmdcapmask;
108extern int ql2xmdenable;
107 109
108extern int qla2x00_loop_reset(scsi_qla_host_t *); 110extern int qla2x00_loop_reset(scsi_qla_host_t *);
109extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 111extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -407,6 +409,8 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *);
407extern int qla24xx_beacon_on(struct scsi_qla_host *); 409extern int qla24xx_beacon_on(struct scsi_qla_host *);
408extern int qla24xx_beacon_off(struct scsi_qla_host *); 410extern int qla24xx_beacon_off(struct scsi_qla_host *);
409extern void qla24xx_beacon_blink(struct scsi_qla_host *); 411extern void qla24xx_beacon_blink(struct scsi_qla_host *);
412extern int qla82xx_beacon_on(struct scsi_qla_host *);
413extern int qla82xx_beacon_off(struct scsi_qla_host *);
410 414
411extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, 415extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
412 uint32_t, uint32_t); 416 uint32_t, uint32_t);
@@ -442,6 +446,7 @@ extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
442extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); 446extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
443extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, 447extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
444 uint8_t *, uint32_t); 448 uint8_t *, uint32_t);
449extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
445 450
446/* 451/*
447 * Global Function Prototypes in qla_gs.c source file. 452 * Global Function Prototypes in qla_gs.c source file.
@@ -569,7 +574,10 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
569extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); 574extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
570extern void qla82xx_start_iocbs(srb_t *); 575extern void qla82xx_start_iocbs(srb_t *);
571extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); 576extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
577extern int qla82xx_check_md_needed(scsi_qla_host_t *);
572extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); 578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
579extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
580extern char *qdev_state(uint32_t);
573 581
574/* BSG related functions */ 582/* BSG related functions */
575extern int qla24xx_bsg_request(struct fc_bsg_job *); 583extern int qla24xx_bsg_request(struct fc_bsg_job *);
@@ -579,4 +587,14 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
579 dma_addr_t, size_t, uint32_t); 587 dma_addr_t, size_t, uint32_t);
580extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, 588extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
581 uint16_t *, uint16_t *); 589 uint16_t *, uint16_t *);
590
591/* Minidump related functions */
592extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
593extern int qla82xx_md_get_template(scsi_qla_host_t *);
594extern int qla82xx_md_alloc(scsi_qla_host_t *);
595extern void qla82xx_md_free(scsi_qla_host_t *);
596extern int qla82xx_md_collect(scsi_qla_host_t *);
597extern void qla82xx_md_prep(scsi_qla_host_t *);
598extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
599
582#endif /* _QLA_GBL_H */ 600#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 37da04d3db26..f03e915f1877 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1480,13 +1480,19 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1480 if (rval == QLA_SUCCESS) { 1480 if (rval == QLA_SUCCESS) {
1481enable_82xx_npiv: 1481enable_82xx_npiv:
1482 fw_major_version = ha->fw_major_version; 1482 fw_major_version = ha->fw_major_version;
1483 rval = qla2x00_get_fw_version(vha, 1483 if (IS_QLA82XX(ha))
1484 &ha->fw_major_version, 1484 qla82xx_check_md_needed(vha);
1485 &ha->fw_minor_version, 1485 else {
1486 &ha->fw_subminor_version, 1486 rval = qla2x00_get_fw_version(vha,
1487 &ha->fw_attributes, &ha->fw_memory_size, 1487 &ha->fw_major_version,
1488 ha->mpi_version, &ha->mpi_capabilities, 1488 &ha->fw_minor_version,
1489 ha->phy_version); 1489 &ha->fw_subminor_version,
1490 &ha->fw_attributes,
1491 &ha->fw_memory_size,
1492 ha->mpi_version,
1493 &ha->mpi_capabilities,
1494 ha->phy_version);
1495 }
1490 if (rval != QLA_SUCCESS) 1496 if (rval != QLA_SUCCESS)
1491 goto failed; 1497 goto failed;
1492 ha->flags.npiv_supported = 0; 1498 ha->flags.npiv_supported = 0;
@@ -1503,10 +1509,8 @@ enable_82xx_npiv:
1503 &ha->fw_xcb_count, NULL, NULL, 1509 &ha->fw_xcb_count, NULL, NULL,
1504 &ha->max_npiv_vports, NULL); 1510 &ha->max_npiv_vports, NULL);
1505 1511
1506 if (!fw_major_version && ql2xallocfwdump) { 1512 if (!fw_major_version && ql2xallocfwdump)
1507 if (!IS_QLA82XX(ha)) 1513 qla2x00_alloc_fw_dump(vha);
1508 qla2x00_alloc_fw_dump(vha);
1509 }
1510 } 1514 }
1511 } else { 1515 } else {
1512 ql_log(ql_log_fatal, vha, 0x00cd, 1516 ql_log(ql_log_fatal, vha, 0x00cd,
@@ -1924,7 +1928,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1924 rval = qla84xx_init_chip(vha); 1928 rval = qla84xx_init_chip(vha);
1925 if (rval != QLA_SUCCESS) { 1929 if (rval != QLA_SUCCESS) {
1926 ql_log(ql_log_warn, 1930 ql_log(ql_log_warn,
1927 vha, 0x8043, 1931 vha, 0x8026,
1928 "Init chip failed.\n"); 1932 "Init chip failed.\n");
1929 break; 1933 break;
1930 } 1934 }
@@ -1933,7 +1937,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1933 cs84xx_time = jiffies - cs84xx_time; 1937 cs84xx_time = jiffies - cs84xx_time;
1934 wtime += cs84xx_time; 1938 wtime += cs84xx_time;
1935 mtime += cs84xx_time; 1939 mtime += cs84xx_time;
1936 ql_dbg(ql_dbg_taskm, vha, 0x8042, 1940 ql_dbg(ql_dbg_taskm, vha, 0x8025,
1937 "Increasing wait time by %ld. " 1941 "Increasing wait time by %ld. "
1938 "New time %ld.\n", cs84xx_time, 1942 "New time %ld.\n", cs84xx_time,
1939 wtime); 1943 wtime);
@@ -5443,11 +5447,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5443 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 5447 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5444 5448
5445 /* Update the firmware version */ 5449 /* Update the firmware version */
5446 qla2x00_get_fw_version(vha, &ha->fw_major_version, 5450 status = qla82xx_check_md_needed(vha);
5447 &ha->fw_minor_version, &ha->fw_subminor_version,
5448 &ha->fw_attributes, &ha->fw_memory_size,
5449 ha->mpi_version, &ha->mpi_capabilities,
5450 ha->phy_version);
5451 5451
5452 if (ha->fce) { 5452 if (ha->fce) {
5453 ha->flags.fce_enabled = 1; 5453 ha->flags.fce_enabled = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8a7591f035e6..3474e86e98ab 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2060,6 +2060,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2060 case ELS_IOCB_TYPE: 2060 case ELS_IOCB_TYPE:
2061 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2061 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2062 break; 2062 break;
2063 case MARKER_TYPE:
2064 /* Do nothing in this case, this check is to prevent it
2065 * from falling into default case
2066 */
2067 break;
2063 default: 2068 default:
2064 /* Type Not Supported. */ 2069 /* Type Not Supported. */
2065 ql_dbg(ql_dbg_async, vha, 0x5042, 2070 ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index f7604ea1af83..3b3cec9f6ac2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4186,3 +4186,130 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4186 4186
4187 return rval; 4187 return rval;
4188} 4188}
4189
4190int
4191qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4192{
4193 struct qla_hw_data *ha = vha->hw;
4194 mbx_cmd_t mc;
4195 mbx_cmd_t *mcp = &mc;
4196 int rval = QLA_FUNCTION_FAILED;
4197
4198 ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
4199
4200 memset(mcp->mb, 0 , sizeof(mcp->mb));
4201 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4202 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4203 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
4204 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
4205
4206 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4207 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
4208 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4209
4210 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4211 mcp->tov = MBX_TOV_SECONDS;
4212 rval = qla2x00_mailbox_command(vha, mcp);
4213
4214 /* Always copy back return mailbox values. */
4215 if (rval != QLA_SUCCESS) {
4216 ql_dbg(ql_dbg_mbx, vha, 0x1120,
4217 "mailbox command FAILED=0x%x, subcode=%x.\n",
4218 (mcp->mb[1] << 16) | mcp->mb[0],
4219 (mcp->mb[3] << 16) | mcp->mb[2]);
4220 } else {
4221 ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
4222 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4223 if (!ha->md_template_size) {
4224 ql_dbg(ql_dbg_mbx, vha, 0x1122,
4225 "Null template size obtained.\n");
4226 rval = QLA_FUNCTION_FAILED;
4227 }
4228 }
4229 return rval;
4230}
4231
4232int
4233qla82xx_md_get_template(scsi_qla_host_t *vha)
4234{
4235 struct qla_hw_data *ha = vha->hw;
4236 mbx_cmd_t mc;
4237 mbx_cmd_t *mcp = &mc;
4238 int rval = QLA_FUNCTION_FAILED;
4239
4240 ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
4241
4242 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4243 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
4244 if (!ha->md_tmplt_hdr) {
4245 ql_log(ql_log_warn, vha, 0x1124,
4246 "Unable to allocate memory for Minidump template.\n");
4247 return rval;
4248 }
4249
4250 memset(mcp->mb, 0 , sizeof(mcp->mb));
4251 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4252 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4253 mcp->mb[2] = LSW(RQST_TMPLT);
4254 mcp->mb[3] = MSW(RQST_TMPLT);
4255 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
4256 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
4257 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
4258 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
4259 mcp->mb[8] = LSW(ha->md_template_size);
4260 mcp->mb[9] = MSW(ha->md_template_size);
4261
4262 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4263 mcp->tov = MBX_TOV_SECONDS;
4264 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
4265 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4266 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4267 rval = qla2x00_mailbox_command(vha, mcp);
4268
4269 if (rval != QLA_SUCCESS) {
4270 ql_dbg(ql_dbg_mbx, vha, 0x1125,
4271 "mailbox command FAILED=0x%x, subcode=%x.\n",
4272 ((mcp->mb[1] << 16) | mcp->mb[0]),
4273 ((mcp->mb[3] << 16) | mcp->mb[2]));
4274 } else
4275 ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
4276 return rval;
4277}
4278
4279int
4280qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4281{
4282 int rval;
4283 struct qla_hw_data *ha = vha->hw;
4284 mbx_cmd_t mc;
4285 mbx_cmd_t *mcp = &mc;
4286
4287 if (!IS_QLA82XX(ha))
4288 return QLA_FUNCTION_FAILED;
4289
4290 ql_dbg(ql_dbg_mbx, vha, 0x1127,
4291 "Entered %s.\n", __func__);
4292
4293 memset(mcp, 0, sizeof(mbx_cmd_t));
4294 mcp->mb[0] = MBC_SET_LED_CONFIG;
4295 if (enable)
4296 mcp->mb[7] = 0xE;
4297 else
4298 mcp->mb[7] = 0xD;
4299
4300 mcp->out_mb = MBX_7|MBX_0;
4301 mcp->in_mb = MBX_0;
4302 mcp->tov = 30;
4303 mcp->flags = 0;
4304
4305 rval = qla2x00_mailbox_command(vha, mcp);
4306 if (rval != QLA_SUCCESS) {
4307 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4308 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4309 } else {
4310 ql_dbg(ql_dbg_mbx, vha, 0x1129,
4311 "Done %s.\n", __func__);
4312 }
4313
4314 return rval;
4315}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 049807cda419..94bded5ddce4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -7,6 +7,8 @@
7#include "qla_def.h" 7#include "qla_def.h"
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/ratelimit.h>
11#include <linux/vmalloc.h>
10#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
11 13
12#define MASK(n) ((1ULL<<(n))-1) 14#define MASK(n) ((1ULL<<(n))-1)
@@ -328,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = {
328}; 330};
329 331
330/* Device states */ 332/* Device states */
331char *qdev_state[] = { 333char *q_dev_state[] = {
332 "Unknown", 334 "Unknown",
333 "Cold", 335 "Cold",
334 "Initializing", 336 "Initializing",
@@ -339,6 +341,11 @@ char *qdev_state[] = {
339 "Quiescent", 341 "Quiescent",
340}; 342};
341 343
344char *qdev_state(uint32_t dev_state)
345{
346 return q_dev_state[dev_state];
347}
348
342/* 349/*
343 * In: 'off' is offset from CRB space in 128M pci map 350 * In: 'off' is offset from CRB space in 128M pci map
344 * Out: 'off' is 2M pci map addr 351 * Out: 'off' is 2M pci map addr
@@ -2355,9 +2362,13 @@ qla82xx_need_reset(struct qla_hw_data *ha)
2355 uint32_t drv_state; 2362 uint32_t drv_state;
2356 int rval; 2363 int rval;
2357 2364
2358 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2365 if (ha->flags.isp82xx_reset_owner)
2359 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2366 return 1;
2360 return rval; 2367 else {
2368 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2369 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2370 return rval;
2371 }
2361} 2372}
2362 2373
2363static inline void 2374static inline void
@@ -2374,8 +2385,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2374 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2385 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2375 } 2386 }
2376 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2387 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2377 ql_log(ql_log_info, vha, 0x00bb, 2388 ql_dbg(ql_dbg_init, vha, 0x00bb,
2378 "drv_state = 0x%x.\n", drv_state); 2389 "drv_state = 0x%08x.\n", drv_state);
2379 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2390 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2380} 2391}
2381 2392
@@ -2598,7 +2609,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2598 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 2609 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2599 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2610 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2600 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2611 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2601 *dsd_seg++ = cpu_to_le32(dsd_list_len); 2612 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
2602 } else { 2613 } else {
2603 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2614 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2604 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2615 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
@@ -3529,6 +3540,7 @@ static void
3529qla82xx_need_reset_handler(scsi_qla_host_t *vha) 3540qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3530{ 3541{
3531 uint32_t dev_state, drv_state, drv_active; 3542 uint32_t dev_state, drv_state, drv_active;
3543 uint32_t active_mask = 0;
3532 unsigned long reset_timeout; 3544 unsigned long reset_timeout;
3533 struct qla_hw_data *ha = vha->hw; 3545 struct qla_hw_data *ha = vha->hw;
3534 struct req_que *req = ha->req_q_map[0]; 3546 struct req_que *req = ha->req_q_map[0];
@@ -3541,15 +3553,32 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3541 qla82xx_idc_lock(ha); 3553 qla82xx_idc_lock(ha);
3542 } 3554 }
3543 3555
3544 qla82xx_set_rst_ready(ha); 3556 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3557 if (!ha->flags.isp82xx_reset_owner) {
3558 ql_dbg(ql_dbg_p3p, vha, 0xb028,
3559 "reset_acknowledged by 0x%x\n", ha->portnum);
3560 qla82xx_set_rst_ready(ha);
3561 } else {
3562 active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
3563 drv_active &= active_mask;
3564 ql_dbg(ql_dbg_p3p, vha, 0xb029,
3565 "active_mask: 0x%08x\n", active_mask);
3566 }
3545 3567
3546 /* wait for 10 seconds for reset ack from all functions */ 3568 /* wait for 10 seconds for reset ack from all functions */
3547 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 3569 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3548 3570
3549 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3571 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3550 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3572 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3573 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3551 3574
3552 while (drv_state != drv_active) { 3575 ql_dbg(ql_dbg_p3p, vha, 0xb02a,
3576 "drv_state: 0x%08x, drv_active: 0x%08x, "
3577 "dev_state: 0x%08x, active_mask: 0x%08x\n",
3578 drv_state, drv_active, dev_state, active_mask);
3579
3580 while (drv_state != drv_active &&
3581 dev_state != QLA82XX_DEV_INITIALIZING) {
3553 if (time_after_eq(jiffies, reset_timeout)) { 3582 if (time_after_eq(jiffies, reset_timeout)) {
3554 ql_log(ql_log_warn, vha, 0x00b5, 3583 ql_log(ql_log_warn, vha, 0x00b5,
3555 "Reset timeout.\n"); 3584 "Reset timeout.\n");
@@ -3560,23 +3589,87 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3560 qla82xx_idc_lock(ha); 3589 qla82xx_idc_lock(ha);
3561 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3590 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3562 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3591 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3592 if (ha->flags.isp82xx_reset_owner)
3593 drv_active &= active_mask;
3594 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3563 } 3595 }
3564 3596
3565 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3597 ql_dbg(ql_dbg_p3p, vha, 0xb02b,
3598 "drv_state: 0x%08x, drv_active: 0x%08x, "
3599 "dev_state: 0x%08x, active_mask: 0x%08x\n",
3600 drv_state, drv_active, dev_state, active_mask);
3601
3566 ql_log(ql_log_info, vha, 0x00b6, 3602 ql_log(ql_log_info, vha, 0x00b6,
3567 "Device state is 0x%x = %s.\n", 3603 "Device state is 0x%x = %s.\n",
3568 dev_state, 3604 dev_state,
3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3605 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3570 3606
3571 /* Force to DEV_COLD unless someone else is starting a reset */ 3607 /* Force to DEV_COLD unless someone else is starting a reset */
3572 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3608 if (dev_state != QLA82XX_DEV_INITIALIZING &&
3609 dev_state != QLA82XX_DEV_COLD) {
3573 ql_log(ql_log_info, vha, 0x00b7, 3610 ql_log(ql_log_info, vha, 0x00b7,
3574 "HW State: COLD/RE-INIT.\n"); 3611 "HW State: COLD/RE-INIT.\n");
3575 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3612 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3613 if (ql2xmdenable) {
3614 if (qla82xx_md_collect(vha))
3615 ql_log(ql_log_warn, vha, 0xb02c,
3616 "Not able to collect minidump.\n");
3617 } else
3618 ql_log(ql_log_warn, vha, 0xb04f,
3619 "Minidump disabled.\n");
3576 } 3620 }
3577} 3621}
3578 3622
3579int 3623int
3624qla82xx_check_md_needed(scsi_qla_host_t *vha)
3625{
3626 struct qla_hw_data *ha = vha->hw;
3627 uint16_t fw_major_version, fw_minor_version, fw_subminor_version;
3628 int rval = QLA_SUCCESS;
3629
3630 fw_major_version = ha->fw_major_version;
3631 fw_minor_version = ha->fw_minor_version;
3632 fw_subminor_version = ha->fw_subminor_version;
3633
3634 rval = qla2x00_get_fw_version(vha, &ha->fw_major_version,
3635 &ha->fw_minor_version, &ha->fw_subminor_version,
3636 &ha->fw_attributes, &ha->fw_memory_size,
3637 ha->mpi_version, &ha->mpi_capabilities,
3638 ha->phy_version);
3639
3640 if (rval != QLA_SUCCESS)
3641 return rval;
3642
3643 if (ql2xmdenable) {
3644 if (!ha->fw_dumped) {
3645 if (fw_major_version != ha->fw_major_version ||
3646 fw_minor_version != ha->fw_minor_version ||
3647 fw_subminor_version != ha->fw_subminor_version) {
3648
3649 ql_log(ql_log_info, vha, 0xb02d,
3650 "Firmware version differs "
3651 "Previous version: %d:%d:%d - "
3652 "New version: %d:%d:%d\n",
3653 ha->fw_major_version,
3654 ha->fw_minor_version,
3655 ha->fw_subminor_version,
3656 fw_major_version, fw_minor_version,
3657 fw_subminor_version);
3658 /* Release MiniDump resources */
3659 qla82xx_md_free(vha);
3660 /* ALlocate MiniDump resources */
3661 qla82xx_md_prep(vha);
3662 } else
3663 ql_log(ql_log_info, vha, 0xb02e,
3664 "Firmware dump available to retrieve\n",
3665 vha->host_no);
3666 }
3667 }
3668 return rval;
3669}
3670
3671
3672int
3580qla82xx_check_fw_alive(scsi_qla_host_t *vha) 3673qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3581{ 3674{
3582 uint32_t fw_heartbeat_counter; 3675 uint32_t fw_heartbeat_counter;
@@ -3637,7 +3730,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3637 ql_log(ql_log_info, vha, 0x009b, 3730 ql_log(ql_log_info, vha, 0x009b,
3638 "Device state is 0x%x = %s.\n", 3731 "Device state is 0x%x = %s.\n",
3639 dev_state, 3732 dev_state,
3640 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3733 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3641 3734
3642 /* wait for 30 seconds for device to go ready */ 3735 /* wait for 30 seconds for device to go ready */
3643 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3736 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3659,26 +3752,33 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3659 ql_log(ql_log_info, vha, 0x009d, 3752 ql_log(ql_log_info, vha, 0x009d,
3660 "Device state is 0x%x = %s.\n", 3753 "Device state is 0x%x = %s.\n",
3661 dev_state, 3754 dev_state,
3662 dev_state < MAX_STATES ? qdev_state[dev_state] : 3755 dev_state < MAX_STATES ? qdev_state(dev_state) :
3663 "Unknown"); 3756 "Unknown");
3664 } 3757 }
3665 3758
3666 switch (dev_state) { 3759 switch (dev_state) {
3667 case QLA82XX_DEV_READY: 3760 case QLA82XX_DEV_READY:
3761 qla82xx_check_md_needed(vha);
3762 ha->flags.isp82xx_reset_owner = 0;
3668 goto exit; 3763 goto exit;
3669 case QLA82XX_DEV_COLD: 3764 case QLA82XX_DEV_COLD:
3670 rval = qla82xx_device_bootstrap(vha); 3765 rval = qla82xx_device_bootstrap(vha);
3671 goto exit; 3766 break;
3672 case QLA82XX_DEV_INITIALIZING: 3767 case QLA82XX_DEV_INITIALIZING:
3673 qla82xx_idc_unlock(ha); 3768 qla82xx_idc_unlock(ha);
3674 msleep(1000); 3769 msleep(1000);
3675 qla82xx_idc_lock(ha); 3770 qla82xx_idc_lock(ha);
3676 break; 3771 break;
3677 case QLA82XX_DEV_NEED_RESET: 3772 case QLA82XX_DEV_NEED_RESET:
3678 if (!ql2xdontresethba) 3773 if (!ql2xdontresethba)
3679 qla82xx_need_reset_handler(vha); 3774 qla82xx_need_reset_handler(vha);
3775 else {
3776 qla82xx_idc_unlock(ha);
3777 msleep(1000);
3778 qla82xx_idc_lock(ha);
3779 }
3680 dev_init_timeout = jiffies + 3780 dev_init_timeout = jiffies +
3681 (ha->nx_dev_init_timeout * HZ); 3781 (ha->nx_dev_init_timeout * HZ);
3682 break; 3782 break;
3683 case QLA82XX_DEV_NEED_QUIESCENT: 3783 case QLA82XX_DEV_NEED_QUIESCENT:
3684 qla82xx_need_qsnt_handler(vha); 3784 qla82xx_need_qsnt_handler(vha);
@@ -3791,6 +3891,28 @@ int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3791 return rval; 3891 return rval;
3792} 3892}
3793 3893
3894void
3895qla82xx_set_reset_owner(scsi_qla_host_t *vha)
3896{
3897 struct qla_hw_data *ha = vha->hw;
3898 uint32_t dev_state;
3899
3900 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3901 if (dev_state == QLA82XX_DEV_READY) {
3902 ql_log(ql_log_info, vha, 0xb02f,
3903 "HW State: NEED RESET\n");
3904 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3905 QLA82XX_DEV_NEED_RESET);
3906 ha->flags.isp82xx_reset_owner = 1;
3907 ql_dbg(ql_dbg_p3p, vha, 0xb030,
3908 "reset_owner is 0x%x\n", ha->portnum);
3909 } else
3910 ql_log(ql_log_info, vha, 0xb031,
3911 "Device state is 0x%x = %s.\n",
3912 dev_state,
3913 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3914}
3915
3794/* 3916/*
3795 * qla82xx_abort_isp 3917 * qla82xx_abort_isp
3796 * Resets ISP and aborts all outstanding commands. 3918 * Resets ISP and aborts all outstanding commands.
@@ -3806,7 +3928,6 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3806{ 3928{
3807 int rval; 3929 int rval;
3808 struct qla_hw_data *ha = vha->hw; 3930 struct qla_hw_data *ha = vha->hw;
3809 uint32_t dev_state;
3810 3931
3811 if (vha->device_flags & DFLG_DEV_FAILED) { 3932 if (vha->device_flags & DFLG_DEV_FAILED) {
3812 ql_log(ql_log_warn, vha, 0x8024, 3933 ql_log(ql_log_warn, vha, 0x8024,
@@ -3816,16 +3937,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3816 ha->flags.isp82xx_reset_hdlr_active = 1; 3937 ha->flags.isp82xx_reset_hdlr_active = 1;
3817 3938
3818 qla82xx_idc_lock(ha); 3939 qla82xx_idc_lock(ha);
3819 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3940 qla82xx_set_reset_owner(vha);
3820 if (dev_state == QLA82XX_DEV_READY) {
3821 ql_log(ql_log_info, vha, 0x8025,
3822 "HW State: NEED RESET.\n");
3823 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3824 QLA82XX_DEV_NEED_RESET);
3825 } else
3826 ql_log(ql_log_info, vha, 0x8026,
3827 "Hw State: %s.\n", dev_state < MAX_STATES ?
3828 qdev_state[dev_state] : "Unknown");
3829 qla82xx_idc_unlock(ha); 3941 qla82xx_idc_unlock(ha);
3830 3942
3831 rval = qla82xx_device_state_handler(vha); 3943 rval = qla82xx_device_state_handler(vha);
@@ -4016,3 +4128,803 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
4016 } 4128 }
4017 } 4129 }
4018} 4130}
4131
4132/* Minidump related functions */
4133int
4134qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
4135{
4136 uint32_t off_value, rval = 0;
4137
4138 WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
4139 (off & 0xFFFF0000));
4140
4141 /* Read back value to make sure write has gone through */
4142 RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
4143 off_value = (off & 0x0000FFFF);
4144
4145 if (flag)
4146 WRT_REG_DWORD((void *)
4147 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
4148 data);
4149 else
4150 rval = RD_REG_DWORD((void *)
4151 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
4152
4153 return rval;
4154}
4155
4156static int
4157qla82xx_minidump_process_control(scsi_qla_host_t *vha,
4158 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4159{
4160 struct qla_hw_data *ha = vha->hw;
4161 struct qla82xx_md_entry_crb *crb_entry;
4162 uint32_t read_value, opcode, poll_time;
4163 uint32_t addr, index, crb_addr;
4164 unsigned long wtime;
4165 struct qla82xx_md_template_hdr *tmplt_hdr;
4166 uint32_t rval = QLA_SUCCESS;
4167 int i;
4168
4169 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4170 crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
4171 crb_addr = crb_entry->addr;
4172
4173 for (i = 0; i < crb_entry->op_count; i++) {
4174 opcode = crb_entry->crb_ctrl.opcode;
4175 if (opcode & QLA82XX_DBG_OPCODE_WR) {
4176 qla82xx_md_rw_32(ha, crb_addr,
4177 crb_entry->value_1, 1);
4178 opcode &= ~QLA82XX_DBG_OPCODE_WR;
4179 }
4180
4181 if (opcode & QLA82XX_DBG_OPCODE_RW) {
4182 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4183 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4184 opcode &= ~QLA82XX_DBG_OPCODE_RW;
4185 }
4186
4187 if (opcode & QLA82XX_DBG_OPCODE_AND) {
4188 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4189 read_value &= crb_entry->value_2;
4190 opcode &= ~QLA82XX_DBG_OPCODE_AND;
4191 if (opcode & QLA82XX_DBG_OPCODE_OR) {
4192 read_value |= crb_entry->value_3;
4193 opcode &= ~QLA82XX_DBG_OPCODE_OR;
4194 }
4195 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4196 }
4197
4198 if (opcode & QLA82XX_DBG_OPCODE_OR) {
4199 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4200 read_value |= crb_entry->value_3;
4201 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4202 opcode &= ~QLA82XX_DBG_OPCODE_OR;
4203 }
4204
4205 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
4206 poll_time = crb_entry->crb_strd.poll_timeout;
4207 wtime = jiffies + poll_time;
4208 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4209
4210 do {
4211 if ((read_value & crb_entry->value_2)
4212 == crb_entry->value_1)
4213 break;
4214 else if (time_after_eq(jiffies, wtime)) {
4215 /* capturing dump failed */
4216 rval = QLA_FUNCTION_FAILED;
4217 break;
4218 } else
4219 read_value = qla82xx_md_rw_32(ha,
4220 crb_addr, 0, 0);
4221 } while (1);
4222 opcode &= ~QLA82XX_DBG_OPCODE_POLL;
4223 }
4224
4225 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
4226 if (crb_entry->crb_strd.state_index_a) {
4227 index = crb_entry->crb_strd.state_index_a;
4228 addr = tmplt_hdr->saved_state_array[index];
4229 } else
4230 addr = crb_addr;
4231
4232 read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4233 index = crb_entry->crb_ctrl.state_index_v;
4234 tmplt_hdr->saved_state_array[index] = read_value;
4235 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
4236 }
4237
4238 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
4239 if (crb_entry->crb_strd.state_index_a) {
4240 index = crb_entry->crb_strd.state_index_a;
4241 addr = tmplt_hdr->saved_state_array[index];
4242 } else
4243 addr = crb_addr;
4244
4245 if (crb_entry->crb_ctrl.state_index_v) {
4246 index = crb_entry->crb_ctrl.state_index_v;
4247 read_value =
4248 tmplt_hdr->saved_state_array[index];
4249 } else
4250 read_value = crb_entry->value_1;
4251
4252 qla82xx_md_rw_32(ha, addr, read_value, 1);
4253 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
4254 }
4255
4256 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
4257 index = crb_entry->crb_ctrl.state_index_v;
4258 read_value = tmplt_hdr->saved_state_array[index];
4259 read_value <<= crb_entry->crb_ctrl.shl;
4260 read_value >>= crb_entry->crb_ctrl.shr;
4261 if (crb_entry->value_2)
4262 read_value &= crb_entry->value_2;
4263 read_value |= crb_entry->value_3;
4264 read_value += crb_entry->value_1;
4265 tmplt_hdr->saved_state_array[index] = read_value;
4266 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
4267 }
4268 crb_addr += crb_entry->crb_strd.addr_stride;
4269 }
4270 return rval;
4271}
4272
4273static void
4274qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
4275 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4276{
4277 struct qla_hw_data *ha = vha->hw;
4278 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
4279 struct qla82xx_md_entry_rdocm *ocm_hdr;
4280 uint32_t *data_ptr = *d_ptr;
4281
4282 ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
4283 r_addr = ocm_hdr->read_addr;
4284 r_stride = ocm_hdr->read_addr_stride;
4285 loop_cnt = ocm_hdr->op_count;
4286
4287 for (i = 0; i < loop_cnt; i++) {
4288 r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
4289 *data_ptr++ = cpu_to_le32(r_value);
4290 r_addr += r_stride;
4291 }
4292 *d_ptr = data_ptr;
4293}
4294
4295static void
4296qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
4297 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4298{
4299 struct qla_hw_data *ha = vha->hw;
4300 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
4301 struct qla82xx_md_entry_mux *mux_hdr;
4302 uint32_t *data_ptr = *d_ptr;
4303
4304 mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
4305 r_addr = mux_hdr->read_addr;
4306 s_addr = mux_hdr->select_addr;
4307 s_stride = mux_hdr->select_value_stride;
4308 s_value = mux_hdr->select_value;
4309 loop_cnt = mux_hdr->op_count;
4310
4311 for (i = 0; i < loop_cnt; i++) {
4312 qla82xx_md_rw_32(ha, s_addr, s_value, 1);
4313 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4314 *data_ptr++ = cpu_to_le32(s_value);
4315 *data_ptr++ = cpu_to_le32(r_value);
4316 s_value += s_stride;
4317 }
4318 *d_ptr = data_ptr;
4319}
4320
4321static void
4322qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
4323 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4324{
4325 struct qla_hw_data *ha = vha->hw;
4326 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
4327 struct qla82xx_md_entry_crb *crb_hdr;
4328 uint32_t *data_ptr = *d_ptr;
4329
4330 crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
4331 r_addr = crb_hdr->addr;
4332 r_stride = crb_hdr->crb_strd.addr_stride;
4333 loop_cnt = crb_hdr->op_count;
4334
4335 for (i = 0; i < loop_cnt; i++) {
4336 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4337 *data_ptr++ = cpu_to_le32(r_addr);
4338 *data_ptr++ = cpu_to_le32(r_value);
4339 r_addr += r_stride;
4340 }
4341 *d_ptr = data_ptr;
4342}
4343
4344static int
4345qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
4346 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4347{
4348 struct qla_hw_data *ha = vha->hw;
4349 uint32_t addr, r_addr, c_addr, t_r_addr;
4350 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
4351 unsigned long p_wait, w_time, p_mask;
4352 uint32_t c_value_w, c_value_r;
4353 struct qla82xx_md_entry_cache *cache_hdr;
4354 int rval = QLA_FUNCTION_FAILED;
4355 uint32_t *data_ptr = *d_ptr;
4356
4357 cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
4358 loop_count = cache_hdr->op_count;
4359 r_addr = cache_hdr->read_addr;
4360 c_addr = cache_hdr->control_addr;
4361 c_value_w = cache_hdr->cache_ctrl.write_value;
4362
4363 t_r_addr = cache_hdr->tag_reg_addr;
4364 t_value = cache_hdr->addr_ctrl.init_tag_value;
4365 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
4366 p_wait = cache_hdr->cache_ctrl.poll_wait;
4367 p_mask = cache_hdr->cache_ctrl.poll_mask;
4368
4369 for (i = 0; i < loop_count; i++) {
4370 qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
4371 if (c_value_w)
4372 qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
4373
4374 if (p_mask) {
4375 w_time = jiffies + p_wait;
4376 do {
4377 c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
4378 if ((c_value_r & p_mask) == 0)
4379 break;
4380 else if (time_after_eq(jiffies, w_time)) {
4381 /* capturing dump failed */
4382 ql_dbg(ql_dbg_p3p, vha, 0xb032,
4383 "c_value_r: 0x%x, poll_mask: 0x%lx, "
4384 "w_time: 0x%lx\n",
4385 c_value_r, p_mask, w_time);
4386 return rval;
4387 }
4388 } while (1);
4389 }
4390
4391 addr = r_addr;
4392 for (k = 0; k < r_cnt; k++) {
4393 r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4394 *data_ptr++ = cpu_to_le32(r_value);
4395 addr += cache_hdr->read_ctrl.read_addr_stride;
4396 }
4397 t_value += cache_hdr->addr_ctrl.tag_value_stride;
4398 }
4399 *d_ptr = data_ptr;
4400 return QLA_SUCCESS;
4401}
4402
4403static void
4404qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
4405 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4406{
4407 struct qla_hw_data *ha = vha->hw;
4408 uint32_t addr, r_addr, c_addr, t_r_addr;
4409 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
4410 uint32_t c_value_w;
4411 struct qla82xx_md_entry_cache *cache_hdr;
4412 uint32_t *data_ptr = *d_ptr;
4413
4414 cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
4415 loop_count = cache_hdr->op_count;
4416 r_addr = cache_hdr->read_addr;
4417 c_addr = cache_hdr->control_addr;
4418 c_value_w = cache_hdr->cache_ctrl.write_value;
4419
4420 t_r_addr = cache_hdr->tag_reg_addr;
4421 t_value = cache_hdr->addr_ctrl.init_tag_value;
4422 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
4423
4424 for (i = 0; i < loop_count; i++) {
4425 qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
4426 qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
4427 addr = r_addr;
4428 for (k = 0; k < r_cnt; k++) {
4429 r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4430 *data_ptr++ = cpu_to_le32(r_value);
4431 addr += cache_hdr->read_ctrl.read_addr_stride;
4432 }
4433 t_value += cache_hdr->addr_ctrl.tag_value_stride;
4434 }
4435 *d_ptr = data_ptr;
4436}
4437
4438static void
4439qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
4440 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4441{
4442 struct qla_hw_data *ha = vha->hw;
4443 uint32_t s_addr, r_addr;
4444 uint32_t r_stride, r_value, r_cnt, qid = 0;
4445 uint32_t i, k, loop_cnt;
4446 struct qla82xx_md_entry_queue *q_hdr;
4447 uint32_t *data_ptr = *d_ptr;
4448
4449 q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
4450 s_addr = q_hdr->select_addr;
4451 r_cnt = q_hdr->rd_strd.read_addr_cnt;
4452 r_stride = q_hdr->rd_strd.read_addr_stride;
4453 loop_cnt = q_hdr->op_count;
4454
4455 for (i = 0; i < loop_cnt; i++) {
4456 qla82xx_md_rw_32(ha, s_addr, qid, 1);
4457 r_addr = q_hdr->read_addr;
4458 for (k = 0; k < r_cnt; k++) {
4459 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4460 *data_ptr++ = cpu_to_le32(r_value);
4461 r_addr += r_stride;
4462 }
4463 qid += q_hdr->q_strd.queue_id_stride;
4464 }
4465 *d_ptr = data_ptr;
4466}
4467
4468static void
4469qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
4470 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4471{
4472 struct qla_hw_data *ha = vha->hw;
4473 uint32_t r_addr, r_value;
4474 uint32_t i, loop_cnt;
4475 struct qla82xx_md_entry_rdrom *rom_hdr;
4476 uint32_t *data_ptr = *d_ptr;
4477
4478 rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
4479 r_addr = rom_hdr->read_addr;
4480 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
4481
4482 for (i = 0; i < loop_cnt; i++) {
4483 qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
4484 (r_addr & 0xFFFF0000), 1);
4485 r_value = qla82xx_md_rw_32(ha,
4486 MD_DIRECT_ROM_READ_BASE +
4487 (r_addr & 0x0000FFFF), 0, 0);
4488 *data_ptr++ = cpu_to_le32(r_value);
4489 r_addr += sizeof(uint32_t);
4490 }
4491 *d_ptr = data_ptr;
4492}
4493
4494static int
4495qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
4496 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4497{
4498 struct qla_hw_data *ha = vha->hw;
4499 uint32_t r_addr, r_value, r_data;
4500 uint32_t i, j, loop_cnt;
4501 struct qla82xx_md_entry_rdmem *m_hdr;
4502 unsigned long flags;
4503 int rval = QLA_FUNCTION_FAILED;
4504 uint32_t *data_ptr = *d_ptr;
4505
4506 m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
4507 r_addr = m_hdr->read_addr;
4508 loop_cnt = m_hdr->read_data_size/16;
4509
4510 if (r_addr & 0xf) {
4511 ql_log(ql_log_warn, vha, 0xb033,
4512 "Read addr 0x%x not 16 bytes alligned\n", r_addr);
4513 return rval;
4514 }
4515
4516 if (m_hdr->read_data_size % 16) {
4517 ql_log(ql_log_warn, vha, 0xb034,
4518 "Read data[0x%x] not multiple of 16 bytes\n",
4519 m_hdr->read_data_size);
4520 return rval;
4521 }
4522
4523 ql_dbg(ql_dbg_p3p, vha, 0xb035,
4524 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
4525 __func__, r_addr, m_hdr->read_data_size, loop_cnt);
4526
4527 write_lock_irqsave(&ha->hw_lock, flags);
4528 for (i = 0; i < loop_cnt; i++) {
4529 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
4530 r_value = 0;
4531 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
4532 r_value = MIU_TA_CTL_ENABLE;
4533 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4534 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
4535 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4536
4537 for (j = 0; j < MAX_CTL_CHECK; j++) {
4538 r_value = qla82xx_md_rw_32(ha,
4539 MD_MIU_TEST_AGT_CTRL, 0, 0);
4540 if ((r_value & MIU_TA_CTL_BUSY) == 0)
4541 break;
4542 }
4543
4544 if (j >= MAX_CTL_CHECK) {
4545 printk_ratelimited(KERN_ERR
4546 "failed to read through agent\n");
4547 write_unlock_irqrestore(&ha->hw_lock, flags);
4548 return rval;
4549 }
4550
4551 for (j = 0; j < 4; j++) {
4552 r_data = qla82xx_md_rw_32(ha,
4553 MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
4554 *data_ptr++ = cpu_to_le32(r_data);
4555 }
4556 r_addr += 16;
4557 }
4558 write_unlock_irqrestore(&ha->hw_lock, flags);
4559 *d_ptr = data_ptr;
4560 return QLA_SUCCESS;
4561}
4562
4563static int
4564qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
4565{
4566 struct qla_hw_data *ha = vha->hw;
4567 uint64_t chksum = 0;
4568 uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
4569 int count = ha->md_template_size/sizeof(uint32_t);
4570
4571 while (count-- > 0)
4572 chksum += *d_ptr++;
4573 while (chksum >> 32)
4574 chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
4575 return ~chksum;
4576}
4577
4578static void
4579qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
4580 qla82xx_md_entry_hdr_t *entry_hdr, int index)
4581{
4582 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
4583 ql_dbg(ql_dbg_p3p, vha, 0xb036,
4584 "Skipping entry[%d]: "
4585 "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4586 index, entry_hdr->entry_type,
4587 entry_hdr->d_ctrl.entry_capture_mask);
4588}
4589
4590int
4591qla82xx_md_collect(scsi_qla_host_t *vha)
4592{
4593 struct qla_hw_data *ha = vha->hw;
4594 int no_entry_hdr = 0;
4595 qla82xx_md_entry_hdr_t *entry_hdr;
4596 struct qla82xx_md_template_hdr *tmplt_hdr;
4597 uint32_t *data_ptr;
4598 uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
4599 int i = 0, rval = QLA_FUNCTION_FAILED;
4600
4601 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4602 data_ptr = (uint32_t *)ha->md_dump;
4603
4604 if (ha->fw_dumped) {
4605 ql_log(ql_log_info, vha, 0xb037,
4606 "Firmware dump available to retrive\n");
4607 goto md_failed;
4608 }
4609
4610 ha->fw_dumped = 0;
4611
4612 if (!ha->md_tmplt_hdr || !ha->md_dump) {
4613 ql_log(ql_log_warn, vha, 0xb038,
4614 "Memory not allocated for minidump capture\n");
4615 goto md_failed;
4616 }
4617
4618 if (qla82xx_validate_template_chksum(vha)) {
4619 ql_log(ql_log_info, vha, 0xb039,
4620 "Template checksum validation error\n");
4621 goto md_failed;
4622 }
4623
4624 no_entry_hdr = tmplt_hdr->num_of_entries;
4625 ql_dbg(ql_dbg_p3p, vha, 0xb03a,
4626 "No of entry headers in Template: 0x%x\n", no_entry_hdr);
4627
4628 ql_dbg(ql_dbg_p3p, vha, 0xb03b,
4629 "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
4630
4631 f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
4632
4633 /* Validate whether required debug level is set */
4634 if ((f_capture_mask & 0x3) != 0x3) {
4635 ql_log(ql_log_warn, vha, 0xb03c,
4636 "Minimum required capture mask[0x%x] level not set\n",
4637 f_capture_mask);
4638 goto md_failed;
4639 }
4640 tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
4641
4642 tmplt_hdr->driver_info[0] = vha->host_no;
4643 tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
4644 (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
4645 QLA_DRIVER_BETA_VER;
4646
4647 total_data_size = ha->md_dump_size;
4648
4649 ql_dbg(ql_log_info, vha, 0xb03d,
4650 "Total minidump data_size 0x%x to be captured\n", total_data_size);
4651
4652 /* Check whether template obtained is valid */
4653 if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
4654 ql_log(ql_log_warn, vha, 0xb04e,
4655 "Bad template header entry type: 0x%x obtained\n",
4656 tmplt_hdr->entry_type);
4657 goto md_failed;
4658 }
4659
4660 entry_hdr = (qla82xx_md_entry_hdr_t *) \
4661 (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
4662
4663 /* Walk through the entry headers */
4664 for (i = 0; i < no_entry_hdr; i++) {
4665
4666 if (data_collected > total_data_size) {
4667 ql_log(ql_log_warn, vha, 0xb03e,
4668 "More MiniDump data collected: [0x%x]\n",
4669 data_collected);
4670 goto md_failed;
4671 }
4672
4673 if (!(entry_hdr->d_ctrl.entry_capture_mask &
4674 ql2xmdcapmask)) {
4675 entry_hdr->d_ctrl.driver_flags |=
4676 QLA82XX_DBG_SKIPPED_FLAG;
4677 ql_dbg(ql_dbg_p3p, vha, 0xb03f,
4678 "Skipping entry[%d]: "
4679 "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4680 i, entry_hdr->entry_type,
4681 entry_hdr->d_ctrl.entry_capture_mask);
4682 goto skip_nxt_entry;
4683 }
4684
4685 ql_dbg(ql_dbg_p3p, vha, 0xb040,
4686 "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
4687 "entry_type: 0x%x, captrue_mask: 0x%x\n",
4688 __func__, i, data_ptr, entry_hdr,
4689 entry_hdr->entry_type,
4690 entry_hdr->d_ctrl.entry_capture_mask);
4691
4692 ql_dbg(ql_dbg_p3p, vha, 0xb041,
4693 "Data collected: [0x%x], Dump size left:[0x%x]\n",
4694 data_collected, (ha->md_dump_size - data_collected));
4695
4696 /* Decode the entry type and take
4697 * required action to capture debug data */
4698 switch (entry_hdr->entry_type) {
4699 case QLA82XX_RDEND:
4700 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4701 break;
4702 case QLA82XX_CNTRL:
4703 rval = qla82xx_minidump_process_control(vha,
4704 entry_hdr, &data_ptr);
4705 if (rval != QLA_SUCCESS) {
4706 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4707 goto md_failed;
4708 }
4709 break;
4710 case QLA82XX_RDCRB:
4711 qla82xx_minidump_process_rdcrb(vha,
4712 entry_hdr, &data_ptr);
4713 break;
4714 case QLA82XX_RDMEM:
4715 rval = qla82xx_minidump_process_rdmem(vha,
4716 entry_hdr, &data_ptr);
4717 if (rval != QLA_SUCCESS) {
4718 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4719 goto md_failed;
4720 }
4721 break;
4722 case QLA82XX_BOARD:
4723 case QLA82XX_RDROM:
4724 qla82xx_minidump_process_rdrom(vha,
4725 entry_hdr, &data_ptr);
4726 break;
4727 case QLA82XX_L2DTG:
4728 case QLA82XX_L2ITG:
4729 case QLA82XX_L2DAT:
4730 case QLA82XX_L2INS:
4731 rval = qla82xx_minidump_process_l2tag(vha,
4732 entry_hdr, &data_ptr);
4733 if (rval != QLA_SUCCESS) {
4734 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4735 goto md_failed;
4736 }
4737 break;
4738 case QLA82XX_L1DAT:
4739 case QLA82XX_L1INS:
4740 qla82xx_minidump_process_l1cache(vha,
4741 entry_hdr, &data_ptr);
4742 break;
4743 case QLA82XX_RDOCM:
4744 qla82xx_minidump_process_rdocm(vha,
4745 entry_hdr, &data_ptr);
4746 break;
4747 case QLA82XX_RDMUX:
4748 qla82xx_minidump_process_rdmux(vha,
4749 entry_hdr, &data_ptr);
4750 break;
4751 case QLA82XX_QUEUE:
4752 qla82xx_minidump_process_queue(vha,
4753 entry_hdr, &data_ptr);
4754 break;
4755 case QLA82XX_RDNOP:
4756 default:
4757 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4758 break;
4759 }
4760
4761 ql_dbg(ql_dbg_p3p, vha, 0xb042,
4762 "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
4763
4764 data_collected = (uint8_t *)data_ptr -
4765 (uint8_t *)ha->md_dump;
4766skip_nxt_entry:
4767 entry_hdr = (qla82xx_md_entry_hdr_t *) \
4768 (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
4769 }
4770
4771 if (data_collected != total_data_size) {
4772 ql_dbg(ql_log_warn, vha, 0xb043,
4773 "MiniDump data mismatch: Data collected: [0x%x],"
4774 "total_data_size:[0x%x]\n",
4775 data_collected, total_data_size);
4776 goto md_failed;
4777 }
4778
4779 ql_log(ql_log_info, vha, 0xb044,
4780 "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
4781 vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
4782 ha->fw_dumped = 1;
4783 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
4784
4785md_failed:
4786 return rval;
4787}
4788
4789int
4790qla82xx_md_alloc(scsi_qla_host_t *vha)
4791{
4792 struct qla_hw_data *ha = vha->hw;
4793 int i, k;
4794 struct qla82xx_md_template_hdr *tmplt_hdr;
4795
4796 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4797
4798 if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
4799 ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
4800 ql_log(ql_log_info, vha, 0xb045,
4801 "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
4802 ql2xmdcapmask);
4803 }
4804
4805 for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
4806 if (i & ql2xmdcapmask)
4807 ha->md_dump_size += tmplt_hdr->capture_size_array[k];
4808 }
4809
4810 if (ha->md_dump) {
4811 ql_log(ql_log_warn, vha, 0xb046,
4812 "Firmware dump previously allocated.\n");
4813 return 1;
4814 }
4815
4816 ha->md_dump = vmalloc(ha->md_dump_size);
4817 if (ha->md_dump == NULL) {
4818 ql_log(ql_log_warn, vha, 0xb047,
4819 "Unable to allocate memory for Minidump size "
4820 "(0x%x).\n", ha->md_dump_size);
4821 return 1;
4822 }
4823 return 0;
4824}
4825
4826void
4827qla82xx_md_free(scsi_qla_host_t *vha)
4828{
4829 struct qla_hw_data *ha = vha->hw;
4830
4831 /* Release the template header allocated */
4832 if (ha->md_tmplt_hdr) {
4833 ql_log(ql_log_info, vha, 0xb048,
4834 "Free MiniDump template: %p, size (%d KB)\n",
4835 ha->md_tmplt_hdr, ha->md_template_size / 1024);
4836 dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
4837 ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4838 ha->md_tmplt_hdr = 0;
4839 }
4840
4841 /* Release the template data buffer allocated */
4842 if (ha->md_dump) {
4843 ql_log(ql_log_info, vha, 0xb049,
4844 "Free MiniDump memory: %p, size (%d KB)\n",
4845 ha->md_dump, ha->md_dump_size / 1024);
4846 vfree(ha->md_dump);
4847 ha->md_dump_size = 0;
4848 ha->md_dump = 0;
4849 }
4850}
4851
4852void
4853qla82xx_md_prep(scsi_qla_host_t *vha)
4854{
4855 struct qla_hw_data *ha = vha->hw;
4856 int rval;
4857
4858 /* Get Minidump template size */
4859 rval = qla82xx_md_get_template_size(vha);
4860 if (rval == QLA_SUCCESS) {
4861 ql_log(ql_log_info, vha, 0xb04a,
4862 "MiniDump Template size obtained (%d KB)\n",
4863 ha->md_template_size / 1024);
4864
4865 /* Get Minidump template */
4866 rval = qla82xx_md_get_template(vha);
4867 if (rval == QLA_SUCCESS) {
4868 ql_dbg(ql_dbg_p3p, vha, 0xb04b,
4869 "MiniDump Template obtained\n");
4870
4871 /* Allocate memory for minidump */
4872 rval = qla82xx_md_alloc(vha);
4873 if (rval == QLA_SUCCESS)
4874 ql_log(ql_log_info, vha, 0xb04c,
4875 "MiniDump memory allocated (%d KB)\n",
4876 ha->md_dump_size / 1024);
4877 else {
4878 ql_log(ql_log_info, vha, 0xb04d,
4879 "Free MiniDump template: %p, size: (%d KB)\n",
4880 ha->md_tmplt_hdr,
4881 ha->md_template_size / 1024);
4882 dma_free_coherent(&ha->pdev->dev,
4883 ha->md_template_size,
4884 ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4885 ha->md_tmplt_hdr = 0;
4886 }
4887
4888 }
4889 }
4890}
4891
4892int
4893qla82xx_beacon_on(struct scsi_qla_host *vha)
4894{
4895
4896 int rval;
4897 struct qla_hw_data *ha = vha->hw;
4898 qla82xx_idc_lock(ha);
4899 rval = qla82xx_mbx_beacon_ctl(vha, 1);
4900
4901 if (rval) {
4902 ql_log(ql_log_warn, vha, 0xb050,
4903 "mbx set led config failed in %s\n", __func__);
4904 goto exit;
4905 }
4906 ha->beacon_blink_led = 1;
4907exit:
4908 qla82xx_idc_unlock(ha);
4909 return rval;
4910}
4911
4912int
4913qla82xx_beacon_off(struct scsi_qla_host *vha)
4914{
4915
4916 int rval;
4917 struct qla_hw_data *ha = vha->hw;
4918 qla82xx_idc_lock(ha);
4919 rval = qla82xx_mbx_beacon_ctl(vha, 0);
4920
4921 if (rval) {
4922 ql_log(ql_log_warn, vha, 0xb051,
4923 "mbx set led config failed in %s\n", __func__);
4924 goto exit;
4925 }
4926 ha->beacon_blink_led = 0;
4927exit:
4928 qla82xx_idc_unlock(ha);
4929 return rval;
4930}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 8a21832c6693..57820c199bc2 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -484,8 +484,6 @@
484#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) 484#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
485#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) 485#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
486#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) 486#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
487
488#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
489#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) 487#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
490 488
491#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 489#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
@@ -890,6 +888,7 @@ struct ct6_dsd {
890}; 888};
891 889
892#define MBC_TOGGLE_INTERRUPT 0x10 890#define MBC_TOGGLE_INTERRUPT 0x10
891#define MBC_SET_LED_CONFIG 0x125
893 892
894/* Flash offset */ 893/* Flash offset */
895#define FLT_REG_BOOTLOAD_82XX 0x72 894#define FLT_REG_BOOTLOAD_82XX 0x72
@@ -922,4 +921,256 @@ struct ct6_dsd {
922#define M25P_INSTR_DP 0xb9 921#define M25P_INSTR_DP 0xb9
923#define M25P_INSTR_RES 0xab 922#define M25P_INSTR_RES 0xab
924 923
924/* Minidump related */
925
926/*
927 * Version of the template
928 * 4 Bytes
929 * X.Major.Minor.RELEASE
930 */
931#define QLA82XX_MINIDUMP_VERSION 0x10101
932
933/*
934 * Entry Type Defines
935 */
936#define QLA82XX_RDNOP 0
937#define QLA82XX_RDCRB 1
938#define QLA82XX_RDMUX 2
939#define QLA82XX_QUEUE 3
940#define QLA82XX_BOARD 4
941#define QLA82XX_RDSRE 5
942#define QLA82XX_RDOCM 6
943#define QLA82XX_CACHE 10
944#define QLA82XX_L1DAT 11
945#define QLA82XX_L1INS 12
946#define QLA82XX_L2DTG 21
947#define QLA82XX_L2ITG 22
948#define QLA82XX_L2DAT 23
949#define QLA82XX_L2INS 24
950#define QLA82XX_RDROM 71
951#define QLA82XX_RDMEM 72
952#define QLA82XX_CNTRL 98
953#define QLA82XX_TLHDR 99
954#define QLA82XX_RDEND 255
955
956/*
957 * Opcodes for Control Entries.
958 * These Flags are bit fields.
959 */
960#define QLA82XX_DBG_OPCODE_WR 0x01
961#define QLA82XX_DBG_OPCODE_RW 0x02
962#define QLA82XX_DBG_OPCODE_AND 0x04
963#define QLA82XX_DBG_OPCODE_OR 0x08
964#define QLA82XX_DBG_OPCODE_POLL 0x10
965#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
966#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
967#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
968
969/*
970 * Template Header and Entry Header definitions start here.
971 */
972
973/*
974 * Template Header
975 * Parts of the template header can be modified by the driver.
976 * These include the saved_state_array, capture_debug_level, driver_timestamp
977 */
978
979#define QLA82XX_DBG_STATE_ARRAY_LEN 16
980#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
981#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
982
983/*
984 * Driver Flags
985 */
986#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
987#define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */
988
989struct qla82xx_md_template_hdr {
990 uint32_t entry_type;
991 uint32_t first_entry_offset;
992 uint32_t size_of_template;
993 uint32_t capture_debug_level;
994
995 uint32_t num_of_entries;
996 uint32_t version;
997 uint32_t driver_timestamp;
998 uint32_t template_checksum;
999
1000 uint32_t driver_capture_mask;
1001 uint32_t driver_info[3];
1002
1003 uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
1004 uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
1005
1006 /* markers_array used to capture some special locations on board */
1007 uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN];
1008 uint32_t num_of_free_entries; /* For internal use */
1009 uint32_t free_entry_offset; /* For internal use */
1010 uint32_t total_table_size; /* For internal use */
1011 uint32_t bkup_table_offset; /* For internal use */
1012} __packed;
1013
1014/*
1015 * Entry Header: Common to All Entry Types
1016 */
1017
1018/*
1019 * Driver Code is for driver to write some info about the entry.
1020 * Currently not used.
1021 */
1022typedef struct qla82xx_md_entry_hdr {
1023 uint32_t entry_type;
1024 uint32_t entry_size;
1025 uint32_t entry_capture_size;
1026 struct {
1027 uint8_t entry_capture_mask;
1028 uint8_t entry_code;
1029 uint8_t driver_code;
1030 uint8_t driver_flags;
1031 } d_ctrl;
1032} __packed qla82xx_md_entry_hdr_t;
1033
1034/*
1035 * Read CRB entry header
1036 */
1037struct qla82xx_md_entry_crb {
1038 qla82xx_md_entry_hdr_t h;
1039 uint32_t addr;
1040 struct {
1041 uint8_t addr_stride;
1042 uint8_t state_index_a;
1043 uint16_t poll_timeout;
1044 } crb_strd;
1045
1046 uint32_t data_size;
1047 uint32_t op_count;
1048
1049 struct {
1050 uint8_t opcode;
1051 uint8_t state_index_v;
1052 uint8_t shl;
1053 uint8_t shr;
1054 } crb_ctrl;
1055
1056 uint32_t value_1;
1057 uint32_t value_2;
1058 uint32_t value_3;
1059} __packed;
1060
1061/*
1062 * Cache entry header
1063 */
1064struct qla82xx_md_entry_cache {
1065 qla82xx_md_entry_hdr_t h;
1066
1067 uint32_t tag_reg_addr;
1068 struct {
1069 uint16_t tag_value_stride;
1070 uint16_t init_tag_value;
1071 } addr_ctrl;
1072
1073 uint32_t data_size;
1074 uint32_t op_count;
1075
1076 uint32_t control_addr;
1077 struct {
1078 uint16_t write_value;
1079 uint8_t poll_mask;
1080 uint8_t poll_wait;
1081 } cache_ctrl;
1082
1083 uint32_t read_addr;
1084 struct {
1085 uint8_t read_addr_stride;
1086 uint8_t read_addr_cnt;
1087 uint16_t rsvd_1;
1088 } read_ctrl;
1089} __packed;
1090
1091/*
1092 * Read OCM
1093 */
1094struct qla82xx_md_entry_rdocm {
1095 qla82xx_md_entry_hdr_t h;
1096
1097 uint32_t rsvd_0;
1098 uint32_t rsvd_1;
1099 uint32_t data_size;
1100 uint32_t op_count;
1101
1102 uint32_t rsvd_2;
1103 uint32_t rsvd_3;
1104 uint32_t read_addr;
1105 uint32_t read_addr_stride;
1106 uint32_t read_addr_cntrl;
1107} __packed;
1108
1109/*
1110 * Read Memory
1111 */
1112struct qla82xx_md_entry_rdmem {
1113 qla82xx_md_entry_hdr_t h;
1114 uint32_t rsvd[6];
1115 uint32_t read_addr;
1116 uint32_t read_data_size;
1117} __packed;
1118
1119/*
1120 * Read ROM
1121 */
1122struct qla82xx_md_entry_rdrom {
1123 qla82xx_md_entry_hdr_t h;
1124 uint32_t rsvd[6];
1125 uint32_t read_addr;
1126 uint32_t read_data_size;
1127} __packed;
1128
1129struct qla82xx_md_entry_mux {
1130 qla82xx_md_entry_hdr_t h;
1131
1132 uint32_t select_addr;
1133 uint32_t rsvd_0;
1134 uint32_t data_size;
1135 uint32_t op_count;
1136
1137 uint32_t select_value;
1138 uint32_t select_value_stride;
1139 uint32_t read_addr;
1140 uint32_t rsvd_1;
1141} __packed;
1142
1143struct qla82xx_md_entry_queue {
1144 qla82xx_md_entry_hdr_t h;
1145
1146 uint32_t select_addr;
1147 struct {
1148 uint16_t queue_id_stride;
1149 uint16_t rsvd_0;
1150 } q_strd;
1151
1152 uint32_t data_size;
1153 uint32_t op_count;
1154 uint32_t rsvd_1;
1155 uint32_t rsvd_2;
1156
1157 uint32_t read_addr;
1158 struct {
1159 uint8_t read_addr_stride;
1160 uint8_t read_addr_cnt;
1161 uint16_t rsvd_3;
1162 } rd_strd;
1163} __packed;
1164
1165#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
1166#define RQST_TMPLT_SIZE 0x0
1167#define RQST_TMPLT 0x1
1168#define MD_DIRECT_ROM_WINDOW 0x42110030
1169#define MD_DIRECT_ROM_READ_BASE 0x42150000
1170#define MD_MIU_TEST_AGT_CTRL 0x41000090
1171#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1172#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1173
1174static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
1175 0x410000B8, 0x410000BC };
925#endif 1176#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1e69527f1e4e..fd14c7bfc626 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -143,7 +143,7 @@ MODULE_PARM_DESC(ql2xmultique_tag,
143 "Set it to 1 to turn on the cpu affinity."); 143 "Set it to 1 to turn on the cpu affinity.");
144 144
145int ql2xfwloadbin; 145int ql2xfwloadbin;
146module_param(ql2xfwloadbin, int, S_IRUGO); 146module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
147MODULE_PARM_DESC(ql2xfwloadbin, 147MODULE_PARM_DESC(ql2xfwloadbin,
148 "Option to specify location from which to load ISP firmware:.\n" 148 "Option to specify location from which to load ISP firmware:.\n"
149 " 2 -- load firmware via the request_firmware() (hotplug).\n" 149 " 2 -- load firmware via the request_firmware() (hotplug).\n"
@@ -158,11 +158,11 @@ MODULE_PARM_DESC(ql2xetsenable,
158 "Default is 0 - skip ETS enablement."); 158 "Default is 0 - skip ETS enablement.");
159 159
160int ql2xdbwr = 1; 160int ql2xdbwr = 1;
161module_param(ql2xdbwr, int, S_IRUGO); 161module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
162MODULE_PARM_DESC(ql2xdbwr, 162MODULE_PARM_DESC(ql2xdbwr,
163 "Option to specify scheme for request queue posting.\n" 163 "Option to specify scheme for request queue posting.\n"
164 " 0 -- Regular doorbell.\n" 164 " 0 -- Regular doorbell.\n"
165 " 1 -- CAMRAM doorbell (faster).\n"); 165 " 1 -- CAMRAM doorbell (faster).\n");
166 166
167int ql2xtargetreset = 1; 167int ql2xtargetreset = 1;
168module_param(ql2xtargetreset, int, S_IRUGO); 168module_param(ql2xtargetreset, int, S_IRUGO);
@@ -183,11 +183,11 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
183 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 183 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
184 184
185int ql2xdontresethba; 185int ql2xdontresethba;
186module_param(ql2xdontresethba, int, S_IRUGO); 186module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
187MODULE_PARM_DESC(ql2xdontresethba, 187MODULE_PARM_DESC(ql2xdontresethba,
188 "Option to specify reset behaviour.\n" 188 "Option to specify reset behaviour.\n"
189 " 0 (Default) -- Reset on failure.\n" 189 " 0 (Default) -- Reset on failure.\n"
190 " 1 -- Do not reset on failure.\n"); 190 " 1 -- Do not reset on failure.\n");
191 191
192uint ql2xmaxlun = MAX_LUNS; 192uint ql2xmaxlun = MAX_LUNS;
193module_param(ql2xmaxlun, uint, S_IRUGO); 193module_param(ql2xmaxlun, uint, S_IRUGO);
@@ -195,6 +195,19 @@ MODULE_PARM_DESC(ql2xmaxlun,
195 "Defines the maximum LU number to register with the SCSI " 195 "Defines the maximum LU number to register with the SCSI "
196 "midlayer. Default is 65535."); 196 "midlayer. Default is 65535.");
197 197
198int ql2xmdcapmask = 0x1F;
199module_param(ql2xmdcapmask, int, S_IRUGO);
200MODULE_PARM_DESC(ql2xmdcapmask,
201 "Set the Minidump driver capture mask level. "
202 "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
203
204int ql2xmdenable;
205module_param(ql2xmdenable, int, S_IRUGO);
206MODULE_PARM_DESC(ql2xmdenable,
207 "Enable/disable MiniDump. "
208 "0 (Default) - MiniDump disabled. "
209 "1 - MiniDump enabled.");
210
198/* 211/*
199 * SCSI host template entry points 212 * SCSI host template entry points
200 */ 213 */
@@ -1750,9 +1763,9 @@ static struct isp_operations qla82xx_isp_ops = {
1750 .read_nvram = qla24xx_read_nvram_data, 1763 .read_nvram = qla24xx_read_nvram_data,
1751 .write_nvram = qla24xx_write_nvram_data, 1764 .write_nvram = qla24xx_write_nvram_data,
1752 .fw_dump = qla24xx_fw_dump, 1765 .fw_dump = qla24xx_fw_dump,
1753 .beacon_on = qla24xx_beacon_on, 1766 .beacon_on = qla82xx_beacon_on,
1754 .beacon_off = qla24xx_beacon_off, 1767 .beacon_off = qla82xx_beacon_off,
1755 .beacon_blink = qla24xx_beacon_blink, 1768 .beacon_blink = NULL,
1756 .read_optrom = qla82xx_read_optrom_data, 1769 .read_optrom = qla82xx_read_optrom_data,
1757 .write_optrom = qla82xx_write_optrom_data, 1770 .write_optrom = qla82xx_write_optrom_data,
1758 .get_flash_version = qla24xx_get_flash_version, 1771 .get_flash_version = qla24xx_get_flash_version,
@@ -2670,6 +2683,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2670 2683
2671 qla2x00_mem_free(ha); 2684 qla2x00_mem_free(ha);
2672 2685
2686 qla82xx_md_free(vha);
2687
2673 qla2x00_free_queues(ha); 2688 qla2x00_free_queues(ha);
2674} 2689}
2675 2690
@@ -3903,8 +3918,11 @@ qla2x00_timer(scsi_qla_host_t *vha)
3903 3918
3904 /* Check if beacon LED needs to be blinked for physical host only */ 3919 /* Check if beacon LED needs to be blinked for physical host only */
3905 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 3920 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
3906 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 3921 /* There is no beacon_blink function for ISP82xx */
3907 start_dpc++; 3922 if (!IS_QLA82XX(ha)) {
3923 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3924 start_dpc++;
3925 }
3908 } 3926 }
3909 3927
3910 /* Process any deferred work. */ 3928 /* Process any deferred work. */