aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c183
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c441
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c79
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c371
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c849
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c663
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c1091
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c160
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c556
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c730
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c275
15 files changed, 2941 insertions, 2606 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 532313e0725e..7836eb01c7fc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
42 int reading; 42 int reading;
43 43
44 if (IS_QLA82XX(ha)) { 44 if (IS_QLA82XX(ha)) {
45 DEBUG2(qla_printk(KERN_INFO, ha, 45 ql_dbg(ql_dbg_user, vha, 0x705b,
46 "Firmware dump not supported for ISP82xx\n")); 46 "Firmware dump not supported for ISP82xx\n");
47 return count; 47 return count;
48 } 48 }
49 49
@@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
56 if (!ha->fw_dump_reading) 56 if (!ha->fw_dump_reading)
57 break; 57 break;
58 58
59 qla_printk(KERN_INFO, ha, 59 ql_log(ql_log_info, vha, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha->host_no); 60 "Firmware dump cleared on (%ld).\n", vha->host_no);
61 61
62 ha->fw_dump_reading = 0; 62 ha->fw_dump_reading = 0;
@@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
66 if (ha->fw_dumped && !ha->fw_dump_reading) { 66 if (ha->fw_dumped && !ha->fw_dump_reading) {
67 ha->fw_dump_reading = 1; 67 ha->fw_dump_reading = 1;
68 68
69 qla_printk(KERN_INFO, ha, 69 ql_log(ql_log_info, vha, 0x705e,
70 "Raw firmware dump ready for read on (%ld).\n", 70 "Raw firmware dump ready for read on (%ld).\n",
71 vha->host_no); 71 vha->host_no);
72 } 72 }
@@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
148 } 148 }
149 149
150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
151 qla_printk(KERN_WARNING, ha, 151 ql_log(ql_log_warn, vha, 0x705f,
152 "HBA not online, failing NVRAM update.\n"); 152 "HBA not online, failing NVRAM update.\n");
153 return -EAGAIN; 153 return -EAGAIN;
154 } 154 }
@@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
159 count); 159 count);
160 160
161 ql_dbg(ql_dbg_user, vha, 0x7060,
162 "Setting ISP_ABORT_NEEDED\n");
161 /* NVRAM settings take effect immediately. */ 163 /* NVRAM settings take effect immediately. */
162 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 164 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
163 qla2xxx_wake_dpc(vha); 165 qla2xxx_wake_dpc(vha);
@@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
255 257
256 ha->optrom_state = QLA_SWAITING; 258 ha->optrom_state = QLA_SWAITING;
257 259
258 DEBUG2(qla_printk(KERN_INFO, ha, 260 ql_dbg(ql_dbg_user, vha, 0x7061,
259 "Freeing flash region allocation -- 0x%x bytes.\n", 261 "Freeing flash region allocation -- 0x%x bytes.\n",
260 ha->optrom_region_size)); 262 ha->optrom_region_size);
261 263
262 vfree(ha->optrom_buffer); 264 vfree(ha->optrom_buffer);
263 ha->optrom_buffer = NULL; 265 ha->optrom_buffer = NULL;
@@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
273 ha->optrom_state = QLA_SREADING; 275 ha->optrom_state = QLA_SREADING;
274 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 276 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
275 if (ha->optrom_buffer == NULL) { 277 if (ha->optrom_buffer == NULL) {
276 qla_printk(KERN_WARNING, ha, 278 ql_log(ql_log_warn, vha, 0x7062,
277 "Unable to allocate memory for optrom retrieval " 279 "Unable to allocate memory for optrom retrieval "
278 "(%x).\n", ha->optrom_region_size); 280 "(%x).\n", ha->optrom_region_size);
279 281
@@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
282 } 284 }
283 285
284 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 286 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
285 qla_printk(KERN_WARNING, ha, 287 ql_log(ql_log_warn, vha, 0x7063,
286 "HBA not online, failing NVRAM update.\n"); 288 "HBA not online, failing NVRAM update.\n");
287 return -EAGAIN; 289 return -EAGAIN;
288 } 290 }
289 291
290 DEBUG2(qla_printk(KERN_INFO, ha, 292 ql_dbg(ql_dbg_user, vha, 0x7064,
291 "Reading flash region -- 0x%x/0x%x.\n", 293 "Reading flash region -- 0x%x/0x%x.\n",
292 ha->optrom_region_start, ha->optrom_region_size)); 294 ha->optrom_region_start, ha->optrom_region_size);
293 295
294 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 296 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
295 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 297 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
@@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
328 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) 330 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
329 valid = 1; 331 valid = 1;
330 if (!valid) { 332 if (!valid) {
331 qla_printk(KERN_WARNING, ha, 333 ql_log(ql_log_warn, vha, 0x7065,
332 "Invalid start region 0x%x/0x%x.\n", start, size); 334 "Invalid start region 0x%x/0x%x.\n", start, size);
333 return -EINVAL; 335 return -EINVAL;
334 } 336 }
@@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
340 ha->optrom_state = QLA_SWRITING; 342 ha->optrom_state = QLA_SWRITING;
341 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 343 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
342 if (ha->optrom_buffer == NULL) { 344 if (ha->optrom_buffer == NULL) {
343 qla_printk(KERN_WARNING, ha, 345 ql_log(ql_log_warn, vha, 0x7066,
344 "Unable to allocate memory for optrom update " 346 "Unable to allocate memory for optrom update "
345 "(%x).\n", ha->optrom_region_size); 347 "(%x)\n", ha->optrom_region_size);
346 348
347 ha->optrom_state = QLA_SWAITING; 349 ha->optrom_state = QLA_SWAITING;
348 return count; 350 return count;
349 } 351 }
350 352
351 DEBUG2(qla_printk(KERN_INFO, ha, 353 ql_dbg(ql_dbg_user, vha, 0x7067,
352 "Staging flash region write -- 0x%x/0x%x.\n", 354 "Staging flash region write -- 0x%x/0x%x.\n",
353 ha->optrom_region_start, ha->optrom_region_size)); 355 ha->optrom_region_start, ha->optrom_region_size);
354 356
355 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 357 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
356 break; 358 break;
@@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
359 break; 361 break;
360 362
361 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 363 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
362 qla_printk(KERN_WARNING, ha, 364 ql_log(ql_log_warn, vha, 0x7068,
363 "HBA not online, failing flash update.\n"); 365 "HBA not online, failing flash update.\n");
364 return -EAGAIN; 366 return -EAGAIN;
365 } 367 }
366 368
367 DEBUG2(qla_printk(KERN_INFO, ha, 369 ql_dbg(ql_dbg_user, vha, 0x7069,
368 "Writing flash region -- 0x%x/0x%x.\n", 370 "Writing flash region -- 0x%x/0x%x.\n",
369 ha->optrom_region_start, ha->optrom_region_size)); 371 ha->optrom_region_start, ha->optrom_region_size);
370 372
371 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 373 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
372 ha->optrom_region_start, ha->optrom_region_size); 374 ha->optrom_region_start, ha->optrom_region_size);
@@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
425 return 0; 427 return 0;
426 428
427 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 429 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
428 qla_printk(KERN_WARNING, ha, 430 ql_log(ql_log_warn, vha, 0x706a,
429 "HBA not online, failing VPD update.\n"); 431 "HBA not online, failing VPD update.\n");
430 return -EAGAIN; 432 return -EAGAIN;
431 } 433 }
@@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
440 442
441 tmp_data = vmalloc(256); 443 tmp_data = vmalloc(256);
442 if (!tmp_data) { 444 if (!tmp_data) {
443 qla_printk(KERN_WARNING, ha, 445 ql_log(ql_log_warn, vha, 0x706b,
444 "Unable to allocate memory for VPD information update.\n"); 446 "Unable to allocate memory for VPD information update.\n");
445 goto done; 447 goto done;
446 } 448 }
@@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
480 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 482 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
481 &ha->sfp_data_dma); 483 &ha->sfp_data_dma);
482 if (!ha->sfp_data) { 484 if (!ha->sfp_data) {
483 qla_printk(KERN_WARNING, ha, 485 ql_log(ql_log_warn, vha, 0x706c,
484 "Unable to allocate memory for SFP read-data.\n"); 486 "Unable to allocate memory for SFP read-data.\n");
485 return 0; 487 return 0;
486 } 488 }
@@ -499,9 +501,10 @@ do_read:
499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, 501 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
500 addr, offset, SFP_BLOCK_SIZE, 0); 502 addr, offset, SFP_BLOCK_SIZE, 0);
501 if (rval != QLA_SUCCESS) { 503 if (rval != QLA_SUCCESS) {
502 qla_printk(KERN_WARNING, ha, 504 ql_log(ql_log_warn, vha, 0x706d,
503 "Unable to read SFP data (%x/%x/%x).\n", rval, 505 "Unable to read SFP data (%x/%x/%x).\n", rval,
504 addr, offset); 506 addr, offset);
507
505 count = 0; 508 count = 0;
506 break; 509 break;
507 } 510 }
@@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
538 type = simple_strtol(buf, NULL, 10); 541 type = simple_strtol(buf, NULL, 10);
539 switch (type) { 542 switch (type) {
540 case 0x2025c: 543 case 0x2025c:
541 qla_printk(KERN_INFO, ha, 544 ql_log(ql_log_info, vha, 0x706e,
542 "Issuing ISP reset on (%ld).\n", vha->host_no); 545 "Issuing ISP reset.\n");
543 546
544 scsi_block_requests(vha->host); 547 scsi_block_requests(vha->host);
545 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 548 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
551 if (!IS_QLA81XX(ha)) 554 if (!IS_QLA81XX(ha))
552 break; 555 break;
553 556
554 qla_printk(KERN_INFO, ha, 557 ql_log(ql_log_info, vha, 0x706f,
555 "Issuing MPI reset on (%ld).\n", vha->host_no); 558 "Issuing MPI reset.\n");
556 559
557 /* Make sure FC side is not in reset */ 560 /* Make sure FC side is not in reset */
558 qla2x00_wait_for_hba_online(vha); 561 qla2x00_wait_for_hba_online(vha);
@@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
560 /* Issue MPI reset */ 563 /* Issue MPI reset */
561 scsi_block_requests(vha->host); 564 scsi_block_requests(vha->host);
562 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 565 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
563 qla_printk(KERN_WARNING, ha, 566 ql_log(ql_log_warn, vha, 0x7070,
564 "MPI reset failed on (%ld).\n", vha->host_no); 567 "MPI reset failed.\n");
565 scsi_unblock_requests(vha->host); 568 scsi_unblock_requests(vha->host);
566 break; 569 break;
567 case 0x2025e: 570 case 0x2025e:
568 if (!IS_QLA82XX(ha) || vha != base_vha) { 571 if (!IS_QLA82XX(ha) || vha != base_vha) {
569 qla_printk(KERN_INFO, ha, 572 ql_log(ql_log_info, vha, 0x7071,
570 "FCoE ctx reset not supported for host%ld.\n", 573 "FCoE ctx reset no supported.\n");
571 vha->host_no);
572 return count; 574 return count;
573 } 575 }
574 576
575 qla_printk(KERN_INFO, ha, 577 ql_log(ql_log_info, vha, 0x7072,
576 "Issuing FCoE CTX reset on host%ld.\n", vha->host_no); 578 "Issuing FCoE ctx reset.\n");
577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 579 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
578 qla2xxx_wake_dpc(vha); 580 qla2xxx_wake_dpc(vha);
579 qla2x00_wait_for_fcoe_ctx_reset(vha); 581 qla2x00_wait_for_fcoe_ctx_reset(vha);
@@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
611 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 613 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
612 &ha->edc_data_dma); 614 &ha->edc_data_dma);
613 if (!ha->edc_data) { 615 if (!ha->edc_data) {
614 DEBUG2(qla_printk(KERN_INFO, ha, 616 ql_log(ql_log_warn, vha, 0x7073,
615 "Unable to allocate memory for EDC write.\n")); 617 "Unable to allocate memory for EDC write.\n");
616 return 0; 618 return 0;
617 } 619 }
618 } 620 }
@@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
631 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data, 633 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
632 dev, adr, len, opt); 634 dev, adr, len, opt);
633 if (rval != QLA_SUCCESS) { 635 if (rval != QLA_SUCCESS) {
634 DEBUG2(qla_printk(KERN_INFO, ha, 636 ql_log(ql_log_warn, vha, 0x7074,
635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 637 "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
636 rval, dev, adr, opt, len, buf[8])); 638 rval, dev, adr, opt, len, buf[8]);
637 return 0; 639 return 0;
638 } 640 }
639 641
@@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
669 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 671 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
670 &ha->edc_data_dma); 672 &ha->edc_data_dma);
671 if (!ha->edc_data) { 673 if (!ha->edc_data) {
672 DEBUG2(qla_printk(KERN_INFO, ha, 674 ql_log(ql_log_warn, vha, 0x708c,
673 "Unable to allocate memory for EDC status.\n")); 675 "Unable to allocate memory for EDC status.\n");
674 return 0; 676 return 0;
675 } 677 }
676 } 678 }
@@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
688 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data, 690 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
689 dev, adr, len, opt); 691 dev, adr, len, opt);
690 if (rval != QLA_SUCCESS) { 692 if (rval != QLA_SUCCESS) {
691 DEBUG2(qla_printk(KERN_INFO, ha, 693 ql_log(ql_log_info, vha, 0x7075,
692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 694 "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
693 rval, dev, adr, opt, len)); 695 rval, dev, adr, opt, len);
694 return 0; 696 return 0;
695 } 697 }
696 698
@@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
749 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 751 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
750 &ha->xgmac_data_dma, GFP_KERNEL); 752 &ha->xgmac_data_dma, GFP_KERNEL);
751 if (!ha->xgmac_data) { 753 if (!ha->xgmac_data) {
752 qla_printk(KERN_WARNING, ha, 754 ql_log(ql_log_warn, vha, 0x7076,
753 "Unable to allocate memory for XGMAC read-data.\n"); 755 "Unable to allocate memory for XGMAC read-data.\n");
754 return 0; 756 return 0;
755 } 757 }
@@ -761,7 +763,7 @@ do_read:
761 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 763 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
762 XGMAC_DATA_SIZE, &actual_size); 764 XGMAC_DATA_SIZE, &actual_size);
763 if (rval != QLA_SUCCESS) { 765 if (rval != QLA_SUCCESS) {
764 qla_printk(KERN_WARNING, ha, 766 ql_log(ql_log_warn, vha, 0x7077,
765 "Unable to read XGMAC data (%x).\n", rval); 767 "Unable to read XGMAC data (%x).\n", rval);
766 count = 0; 768 count = 0;
767 } 769 }
@@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
801 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 803 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
802 &ha->dcbx_tlv_dma, GFP_KERNEL); 804 &ha->dcbx_tlv_dma, GFP_KERNEL);
803 if (!ha->dcbx_tlv) { 805 if (!ha->dcbx_tlv) {
804 qla_printk(KERN_WARNING, ha, 806 ql_log(ql_log_warn, vha, 0x7078,
805 "Unable to allocate memory for DCBX TLV read-data.\n"); 807 "Unable to allocate memory for DCBX TLV read-data.\n");
806 return 0; 808 return 0;
807 } 809 }
@@ -813,8 +815,8 @@ do_read:
813 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 815 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
814 DCBX_TLV_DATA_SIZE); 816 DCBX_TLV_DATA_SIZE);
815 if (rval != QLA_SUCCESS) { 817 if (rval != QLA_SUCCESS) {
816 qla_printk(KERN_WARNING, ha, 818 ql_log(ql_log_warn, vha, 0x7079,
817 "Unable to read DCBX TLV data (%x).\n", rval); 819 "Unable to read DCBX TLV (%x).\n", rval);
818 count = 0; 820 count = 0;
819 } 821 }
820 822
@@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
869 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 871 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
870 iter->attr); 872 iter->attr);
871 if (ret) 873 if (ret)
872 qla_printk(KERN_INFO, vha->hw, 874 ql_log(ql_log_warn, vha, 0x00f3,
873 "Unable to create sysfs %s binary attribute " 875 "Unable to create sysfs %s binary attribute (%d).\n",
874 "(%d).\n", iter->name, ret); 876 iter->name, ret);
877 else
878 ql_dbg(ql_dbg_init, vha, 0x00f4,
879 "Successfully created sysfs %s binary attribure.\n",
880 iter->name);
875 } 881 }
876} 882}
877 883
@@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1126 return -EPERM; 1132 return -EPERM;
1127 1133
1128 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 1134 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1129 qla_printk(KERN_WARNING, ha, 1135 ql_log(ql_log_warn, vha, 0x707a,
1130 "Abort ISP active -- ignoring beacon request.\n"); 1136 "Abort ISP active -- ignoring beacon request.\n");
1131 return -EBUSY; 1137 return -EBUSY;
1132 } 1138 }
@@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
1322 temp = frac = 0; 1328 temp = frac = 0;
1323 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1329 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1324 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1330 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1325 DEBUG2_3_11(printk(KERN_WARNING 1331 ql_log(ql_log_warn, vha, 0x707b,
1326 "%s(%ld): isp reset in progress.\n", 1332 "ISP reset active.\n");
1327 __func__, vha->host_no));
1328 else if (!vha->hw->flags.eeh_busy) 1333 else if (!vha->hw->flags.eeh_busy)
1329 rval = qla2x00_get_thermal_temp(vha, &temp, &frac); 1334 rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1330 if (rval != QLA_SUCCESS) 1335 if (rval != QLA_SUCCESS)
@@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1343 1348
1344 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1349 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1345 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1350 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1346 DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n", 1351 ql_log(ql_log_warn, vha, 0x707c,
1347 __func__, vha->host_no)); 1352 "ISP reset active.\n");
1348 else if (!vha->hw->flags.eeh_busy) 1353 else if (!vha->hw->flags.eeh_busy)
1349 rval = qla2x00_get_firmware_state(vha, state); 1354 rval = qla2x00_get_firmware_state(vha, state);
1350 if (rval != QLA_SUCCESS) 1355 if (rval != QLA_SUCCESS)
@@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1645 1650
1646 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1651 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1647 if (stats == NULL) { 1652 if (stats == NULL) {
1648 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1653 ql_log(ql_log_warn, vha, 0x707d,
1649 __func__, base_vha->host_no)); 1654 "Failed to allocate memory for stats.\n");
1650 goto done; 1655 goto done;
1651 } 1656 }
1652 memset(stats, 0, DMA_POOL_SIZE); 1657 memset(stats, 0, DMA_POOL_SIZE);
@@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1746 1751
1747 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1752 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1748 if (ret) { 1753 if (ret) {
1749 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, " 1754 ql_log(ql_log_warn, vha, 0x707e,
1750 "status %x\n", ret)); 1755 "Vport sanity check failed, status %x\n", ret);
1751 return (ret); 1756 return (ret);
1752 } 1757 }
1753 1758
1754 vha = qla24xx_create_vhost(fc_vport); 1759 vha = qla24xx_create_vhost(fc_vport);
1755 if (vha == NULL) { 1760 if (vha == NULL) {
1756 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n", 1761 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1757 vha));
1758 return FC_VPORT_FAILED; 1762 return FC_VPORT_FAILED;
1759 } 1763 }
1760 if (disable) { 1764 if (disable) {
@@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1764 atomic_set(&vha->vp_state, VP_FAILED); 1768 atomic_set(&vha->vp_state, VP_FAILED);
1765 1769
1766 /* ready to create vport */ 1770 /* ready to create vport */
1767 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n", 1771 ql_log(ql_log_info, vha, 0x7080,
1768 vha->vp_idx); 1772 "VP entry id %d assigned.\n", vha->vp_idx);
1769 1773
1770 /* initialized vport states */ 1774 /* initialized vport states */
1771 atomic_set(&vha->loop_state, LOOP_DOWN); 1775 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1775 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 1779 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1776 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1780 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1777 /* Don't retry or attempt login of this virtual port */ 1781 /* Don't retry or attempt login of this virtual port */
1778 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1782 ql_dbg(ql_dbg_user, vha, 0x7081,
1779 base_vha->host_no)); 1783 "Vport loop state is not UP.\n");
1780 atomic_set(&vha->loop_state, LOOP_DEAD); 1784 atomic_set(&vha->loop_state, LOOP_DEAD);
1781 if (!disable) 1785 if (!disable)
1782 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1785 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
1786 if (ha->fw_attributes & BIT_4) { 1790 if (ha->fw_attributes & BIT_4) {
1787 vha->flags.difdix_supported = 1; 1791 vha->flags.difdix_supported = 1;
1788 DEBUG18(qla_printk(KERN_INFO, ha, 1792 ql_dbg(ql_dbg_user, vha, 0x7082,
1789 "Registering for DIF/DIX type 1 and 3" 1793 "Registered for DIF/DIX type 1 and 3 protection.\n");
1790 " protection.\n"));
1791 scsi_host_set_prot(vha->host, 1794 scsi_host_set_prot(vha->host,
1792 SHOST_DIF_TYPE1_PROTECTION 1795 SHOST_DIF_TYPE1_PROTECTION
1793 | SHOST_DIF_TYPE2_PROTECTION 1796 | SHOST_DIF_TYPE2_PROTECTION
@@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1802 1805
1803 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1806 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1804 &ha->pdev->dev)) { 1807 &ha->pdev->dev)) {
1805 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1808 ql_dbg(ql_dbg_user, vha, 0x7083,
1806 vha->host_no, vha->vp_idx)); 1809 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1807 goto vport_create_failed_2; 1810 goto vport_create_failed_2;
1808 } 1811 }
1809 1812
@@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1820 1823
1821 if (ha->flags.cpu_affinity_enabled) { 1824 if (ha->flags.cpu_affinity_enabled) {
1822 req = ha->req_q_map[1]; 1825 req = ha->req_q_map[1];
1826 ql_dbg(ql_dbg_multiq, vha, 0xc000,
1827 "Request queue %p attached with "
1828 "VP[%d], cpu affinity =%d\n",
1829 req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1823 goto vport_queue; 1830 goto vport_queue;
1824 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1831 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1825 goto vport_queue; 1832 goto vport_queue;
@@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1836 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1843 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1837 qos); 1844 qos);
1838 if (!ret) 1845 if (!ret)
1839 qla_printk(KERN_WARNING, ha, 1846 ql_log(ql_log_warn, vha, 0x7084,
1840 "Can't create request queue for vp_idx:%d\n", 1847 "Can't create request queue for VP[%d]\n",
1841 vha->vp_idx); 1848 vha->vp_idx);
1842 else { 1849 else {
1843 DEBUG2(qla_printk(KERN_INFO, ha, 1850 ql_dbg(ql_dbg_multiq, vha, 0xc001,
1844 "Request Que:%d (QoS: %d) created for vp_idx:%d\n", 1851 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1845 ret, qos, vha->vp_idx)); 1852 ret, qos, vha->vp_idx);
1853 ql_dbg(ql_dbg_user, vha, 0x7085,
1854 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1855 ret, qos, vha->vp_idx);
1846 req = ha->req_q_map[ret]; 1856 req = ha->req_q_map[ret];
1847 } 1857 }
1848 } 1858 }
@@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1882 1892
1883 if (vha->timer_active) { 1893 if (vha->timer_active) {
1884 qla2x00_vp_stop_timer(vha); 1894 qla2x00_vp_stop_timer(vha);
1885 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" 1895 ql_dbg(ql_dbg_user, vha, 0x7086,
1886 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); 1896 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1887 } 1897 }
1888 1898
1889 /* No pending activities shall be there on the vha now */ 1899 /* No pending activities shall be there on the vha now */
1890 DEBUG(msleep(random32()%10)); /* Just to see if something falls on 1900 if (ql2xextended_error_logging & ql_dbg_user)
1901 msleep(random32()%10); /* Just to see if something falls on
1891 * the net we have placed below */ 1902 * the net we have placed below */
1892 1903
1893 BUG_ON(atomic_read(&vha->vref_count)); 1904 BUG_ON(atomic_read(&vha->vref_count));
@@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1901 1912
1902 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1913 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1903 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1914 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1904 qla_printk(KERN_WARNING, ha, 1915 ql_log(ql_log_warn, vha, 0x7087,
1905 "Queue delete failed.\n"); 1916 "Queue delete failed.\n");
1906 } 1917 }
1907 1918
1908 scsi_host_put(vha->host); 1919 scsi_host_put(vha->host);
1909 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); 1920 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1910 return 0; 1921 return 0;
1911} 1922}
1912 1923
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8c10e2c4928e..07d1767cd26b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -36,7 +36,8 @@ done:
36} 36}
37 37
38int 38int
39qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 39qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
40 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
40{ 41{
41 int i, ret, num_valid; 42 int i, ret, num_valid;
42 uint8_t *bcode; 43 uint8_t *bcode;
@@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
51 52
52 if (bcode_val == 0xFFFFFFFF) { 53 if (bcode_val == 0xFFFFFFFF) {
53 /* No FCP Priority config data in flash */ 54 /* No FCP Priority config data in flash */
54 DEBUG2(printk(KERN_INFO 55 ql_dbg(ql_dbg_user, vha, 0x7051,
55 "%s: No FCP priority config data.\n", 56 "No FCP Priority config data.\n");
56 __func__));
57 return 0; 57 return 0;
58 } 58 }
59 59
60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 bcode[3] != 'S') { 61 bcode[3] != 'S') {
62 /* Invalid FCP priority data header*/ 62 /* Invalid FCP priority data header*/
63 DEBUG2(printk(KERN_ERR 63 ql_dbg(ql_dbg_user, vha, 0x7052,
64 "%s: Invalid FCP Priority data header. bcode=0x%x\n", 64 "Invalid FCP Priority data header. bcode=0x%x.\n",
65 __func__, bcode_val)); 65 bcode_val);
66 return 0; 66 return 0;
67 } 67 }
68 if (flag != 1) 68 if (flag != 1)
@@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
77 77
78 if (num_valid == 0) { 78 if (num_valid == 0) {
79 /* No valid FCP priority data entries */ 79 /* No valid FCP priority data entries */
80 DEBUG2(printk(KERN_ERR 80 ql_dbg(ql_dbg_user, vha, 0x7053,
81 "%s: No valid FCP Priority data entries.\n", 81 "No valid FCP Priority data entries.\n");
82 __func__));
83 ret = 0; 82 ret = 0;
84 } else { 83 } else {
85 /* FCP priority data is valid */ 84 /* FCP priority data is valid */
86 DEBUG2(printk(KERN_INFO 85 ql_dbg(ql_dbg_user, vha, 0x7054,
87 "%s: Valid FCP priority data. num entries = %d\n", 86 "Valid FCP priority data. num entries = %d.\n",
88 __func__, num_valid)); 87 num_valid);
89 } 88 }
90 89
91 return ret; 90 return ret;
@@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
182 if (!ha->fcp_prio_cfg) { 181 if (!ha->fcp_prio_cfg) {
183 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 182 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
184 if (!ha->fcp_prio_cfg) { 183 if (!ha->fcp_prio_cfg) {
185 qla_printk(KERN_WARNING, ha, 184 ql_log(ql_log_warn, vha, 0x7050,
186 "Unable to allocate memory " 185 "Unable to allocate memory for fcp prio "
187 "for fcp prio config data (%x).\n", 186 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
188 FCP_PRIO_CFG_SIZE);
189 bsg_job->reply->result = (DID_ERROR << 16); 187 bsg_job->reply->result = (DID_ERROR << 16);
190 ret = -ENOMEM; 188 ret = -ENOMEM;
191 goto exit_fcp_prio_cfg; 189 goto exit_fcp_prio_cfg;
@@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
198 FCP_PRIO_CFG_SIZE); 196 FCP_PRIO_CFG_SIZE);
199 197
200 /* validate fcp priority data */ 198 /* validate fcp priority data */
201 if (!qla24xx_fcp_prio_cfg_valid( 199
202 (struct qla_fcp_prio_cfg *) 200 if (!qla24xx_fcp_prio_cfg_valid(vha,
203 ha->fcp_prio_cfg, 1)) { 201 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
204 bsg_job->reply->result = (DID_ERROR << 16); 202 bsg_job->reply->result = (DID_ERROR << 16);
205 ret = -EINVAL; 203 ret = -EINVAL;
206 /* If buffer was invalidatic int 204 /* If buffer was invalidatic int
@@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
256 254
257 /* pass through is supported only for ISP 4Gb or higher */ 255 /* pass through is supported only for ISP 4Gb or higher */
258 if (!IS_FWI2_CAPABLE(ha)) { 256 if (!IS_FWI2_CAPABLE(ha)) {
259 DEBUG2(qla_printk(KERN_INFO, ha, 257 ql_dbg(ql_dbg_user, vha, 0x7001,
260 "scsi(%ld):ELS passthru not supported for ISP23xx based " 258 "ELS passthru not supported for ISP23xx based adapters.\n");
261 "adapters\n", vha->host_no));
262 rval = -EPERM; 259 rval = -EPERM;
263 goto done; 260 goto done;
264 } 261 }
@@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
266 /* Multiple SG's are not supported for ELS requests */ 263 /* Multiple SG's are not supported for ELS requests */
267 if (bsg_job->request_payload.sg_cnt > 1 || 264 if (bsg_job->request_payload.sg_cnt > 1 ||
268 bsg_job->reply_payload.sg_cnt > 1) { 265 bsg_job->reply_payload.sg_cnt > 1) {
269 DEBUG2(printk(KERN_INFO 266 ql_dbg(ql_dbg_user, vha, 0x7002,
270 "multiple SG's are not supported for ELS requests" 267 "Multiple SG's are not suppored for ELS requests, "
271 " [request_sg_cnt: %x reply_sg_cnt: %x]\n", 268 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
272 bsg_job->request_payload.sg_cnt, 269 bsg_job->request_payload.sg_cnt,
273 bsg_job->reply_payload.sg_cnt)); 270 bsg_job->reply_payload.sg_cnt);
274 rval = -EPERM; 271 rval = -EPERM;
275 goto done; 272 goto done;
276 } 273 }
@@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
281 * if not perform fabric login 278 * if not perform fabric login
282 */ 279 */
283 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 280 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
284 DEBUG2(qla_printk(KERN_WARNING, ha, 281 ql_dbg(ql_dbg_user, vha, 0x7003,
285 "failed to login port %06X for ELS passthru\n", 282 "Failed to login port %06X for ELS passthru.\n",
286 fcport->d_id.b24)); 283 fcport->d_id.b24);
287 rval = -EIO; 284 rval = -EIO;
288 goto done; 285 goto done;
289 } 286 }
@@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
314 } 311 }
315 312
316 if (!vha->flags.online) { 313 if (!vha->flags.online) {
317 DEBUG2(qla_printk(KERN_WARNING, ha, 314 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
318 "host not online\n"));
319 rval = -EIO; 315 rval = -EIO;
320 goto done; 316 goto done;
321 } 317 }
@@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
337 333
338 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 334 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
339 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 335 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
340 DEBUG2(printk(KERN_INFO 336 ql_log(ql_log_warn, vha, 0x7008,
341 "dma mapping resulted in different sg counts \ 337 "dma mapping resulted in different sg counts, "
342 [request_sg_cnt: %x dma_request_sg_cnt: %x\ 338 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
343 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 339 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
344 bsg_job->request_payload.sg_cnt, req_sg_cnt, 340 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
345 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
346 rval = -EAGAIN; 341 rval = -EAGAIN;
347 goto done_unmap_sg; 342 goto done_unmap_sg;
348 } 343 }
@@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
363 "bsg_els_rpt" : "bsg_els_hst"); 358 "bsg_els_rpt" : "bsg_els_hst");
364 els->u.bsg_job = bsg_job; 359 els->u.bsg_job = bsg_job;
365 360
366 DEBUG2(qla_printk(KERN_INFO, ha, 361 ql_dbg(ql_dbg_user, vha, 0x700a,
367 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " 362 "bsg rqst type: %s els type: %x - loop-id=%x "
368 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, 363 "portid=%-2x%02x%02x.\n", type,
369 bsg_job->request->rqst_data.h_els.command_code, 364 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
370 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 365 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
371 fcport->d_id.b.al_pa));
372 366
373 rval = qla2x00_start_sp(sp); 367 rval = qla2x00_start_sp(sp);
374 if (rval != QLA_SUCCESS) { 368 if (rval != QLA_SUCCESS) {
369 ql_log(ql_log_warn, vha, 0x700e,
370 "qla2x00_start_sp failed = %d\n", rval);
375 kfree(sp->ctx); 371 kfree(sp->ctx);
376 mempool_free(sp, ha->srb_mempool); 372 mempool_free(sp, ha->srb_mempool);
377 rval = -EIO; 373 rval = -EIO;
@@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
411 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 407 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 408 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 if (!req_sg_cnt) { 409 if (!req_sg_cnt) {
410 ql_log(ql_log_warn, vha, 0x700f,
411 "dma_map_sg return %d for request\n", req_sg_cnt);
414 rval = -ENOMEM; 412 rval = -ENOMEM;
415 goto done; 413 goto done;
416 } 414 }
@@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
418 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 416 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
419 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 417 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
420 if (!rsp_sg_cnt) { 418 if (!rsp_sg_cnt) {
419 ql_log(ql_log_warn, vha, 0x7010,
420 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
421 rval = -ENOMEM; 421 rval = -ENOMEM;
422 goto done; 422 goto done;
423 } 423 }
424 424
425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
427 DEBUG2(qla_printk(KERN_WARNING, ha, 427 ql_log(ql_log_warn, vha, 0x7011,
428 "[request_sg_cnt: %x dma_request_sg_cnt: %x\ 428 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
429 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 429 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
430 bsg_job->request_payload.sg_cnt, req_sg_cnt, 430 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
431 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
432 rval = -EAGAIN; 431 rval = -EAGAIN;
433 goto done_unmap_sg; 432 goto done_unmap_sg;
434 } 433 }
435 434
436 if (!vha->flags.online) { 435 if (!vha->flags.online) {
437 DEBUG2(qla_printk(KERN_WARNING, ha, 436 ql_log(ql_log_warn, vha, 0x7012,
438 "host not online\n")); 437 "Host is not online.\n");
439 rval = -EIO; 438 rval = -EIO;
440 goto done_unmap_sg; 439 goto done_unmap_sg;
441 } 440 }
@@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
451 loop_id = vha->mgmt_svr_loop_id; 450 loop_id = vha->mgmt_svr_loop_id;
452 break; 451 break;
453 default: 452 default:
454 DEBUG2(qla_printk(KERN_INFO, ha, 453 ql_dbg(ql_dbg_user, vha, 0x7013,
455 "Unknown loop id: %x\n", loop_id)); 454 "Unknown loop id: %x.\n", loop_id);
456 rval = -EINVAL; 455 rval = -EINVAL;
457 goto done_unmap_sg; 456 goto done_unmap_sg;
458 } 457 }
@@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
464 */ 463 */
465 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 464 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
466 if (!fcport) { 465 if (!fcport) {
466 ql_log(ql_log_warn, vha, 0x7014,
467 "Failed to allocate fcport.\n");
467 rval = -ENOMEM; 468 rval = -ENOMEM;
468 goto done_unmap_sg; 469 goto done_unmap_sg;
469 } 470 }
@@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
479 /* Alloc SRB structure */ 480 /* Alloc SRB structure */
480 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); 481 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
481 if (!sp) { 482 if (!sp) {
483 ql_log(ql_log_warn, vha, 0x7015,
484 "qla2x00_get_ctx_bsg_sp failed.\n");
482 rval = -ENOMEM; 485 rval = -ENOMEM;
483 goto done_free_fcport; 486 goto done_free_fcport;
484 } 487 }
@@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
488 ct->name = "bsg_ct"; 491 ct->name = "bsg_ct";
489 ct->u.bsg_job = bsg_job; 492 ct->u.bsg_job = bsg_job;
490 493
491 DEBUG2(qla_printk(KERN_INFO, ha, 494 ql_dbg(ql_dbg_user, vha, 0x7016,
492 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " 495 "bsg rqst type: %s else type: %x - "
493 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, 496 "loop-id=%x portid=%02x%02x%02x.\n", type,
494 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), 497 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
495 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 498 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
496 fcport->d_id.b.al_pa)); 499 fcport->d_id.b.al_pa);
497 500
498 rval = qla2x00_start_sp(sp); 501 rval = qla2x00_start_sp(sp);
499 if (rval != QLA_SUCCESS) { 502 if (rval != QLA_SUCCESS) {
503 ql_log(ql_log_warn, vha, 0x7017,
504 "qla2x00_start_sp failed=%d.\n", rval);
500 kfree(sp->ctx); 505 kfree(sp->ctx);
501 mempool_free(sp, ha->srb_mempool); 506 mempool_free(sp, ha->srb_mempool);
502 rval = -EIO; 507 rval = -EIO;
@@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
535 ha->notify_dcbx_comp = 1; 540 ha->notify_dcbx_comp = 1;
536 ret = qla81xx_set_port_config(vha, new_config); 541 ret = qla81xx_set_port_config(vha, new_config);
537 if (ret != QLA_SUCCESS) { 542 if (ret != QLA_SUCCESS) {
538 DEBUG2(printk(KERN_ERR 543 ql_log(ql_log_warn, vha, 0x7021,
539 "%s(%lu): Set port config failed\n", 544 "set port config failed.\n");
540 __func__, vha->host_no));
541 ha->notify_dcbx_comp = 0; 545 ha->notify_dcbx_comp = 0;
542 rval = -EINVAL; 546 rval = -EINVAL;
543 goto done_set_internal; 547 goto done_set_internal;
@@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
545 549
546 /* Wait for DCBX complete event */ 550 /* Wait for DCBX complete event */
547 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { 551 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
548 DEBUG2(qla_printk(KERN_WARNING, ha, 552 ql_dbg(ql_dbg_user, vha, 0x7022,
549 "State change notificaition not received.\n")); 553 "State change notification not received.\n");
550 } else 554 } else
551 DEBUG2(qla_printk(KERN_INFO, ha, 555 ql_dbg(ql_dbg_user, vha, 0x7023,
552 "State change RECEIVED\n")); 556 "State change received.\n");
553 557
554 ha->notify_dcbx_comp = 0; 558 ha->notify_dcbx_comp = 0;
555 559
@@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
581 ha->notify_dcbx_comp = wait; 585 ha->notify_dcbx_comp = wait;
582 ret = qla81xx_set_port_config(vha, new_config); 586 ret = qla81xx_set_port_config(vha, new_config);
583 if (ret != QLA_SUCCESS) { 587 if (ret != QLA_SUCCESS) {
584 DEBUG2(printk(KERN_ERR 588 ql_log(ql_log_warn, vha, 0x7025,
585 "%s(%lu): Set port config failed\n", 589 "Set port config failed.\n");
586 __func__, vha->host_no));
587 ha->notify_dcbx_comp = 0; 590 ha->notify_dcbx_comp = 0;
588 rval = -EINVAL; 591 rval = -EINVAL;
589 goto done_reset_internal; 592 goto done_reset_internal;
@@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
592 /* Wait for DCBX complete event */ 595 /* Wait for DCBX complete event */
593 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 596 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
594 (20 * HZ))) { 597 (20 * HZ))) {
595 DEBUG2(qla_printk(KERN_WARNING, ha, 598 ql_dbg(ql_dbg_user, vha, 0x7026,
596 "State change notificaition not received.\n")); 599 "State change notification not received.\n");
597 ha->notify_dcbx_comp = 0; 600 ha->notify_dcbx_comp = 0;
598 rval = -EINVAL; 601 rval = -EINVAL;
599 goto done_reset_internal; 602 goto done_reset_internal;
600 } else 603 } else
601 DEBUG2(qla_printk(KERN_INFO, ha, 604 ql_dbg(ql_dbg_user, vha, 0x7027,
602 "State change RECEIVED\n")); 605 "State change received.\n");
603 606
604 ha->notify_dcbx_comp = 0; 607 ha->notify_dcbx_comp = 0;
605 } 608 }
@@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
629 632
630 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 633 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
631 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 634 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
632 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 635 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
636 ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
633 return -EBUSY; 637 return -EBUSY;
638 }
634 639
635 if (!vha->flags.online) { 640 if (!vha->flags.online) {
636 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n")); 641 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
637 return -EIO; 642 return -EIO;
638 } 643 }
639 644
@@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
641 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 646 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
642 DMA_TO_DEVICE); 647 DMA_TO_DEVICE);
643 648
644 if (!elreq.req_sg_cnt) 649 if (!elreq.req_sg_cnt) {
650 ql_log(ql_log_warn, vha, 0x701a,
651 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
645 return -ENOMEM; 652 return -ENOMEM;
653 }
646 654
647 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 655 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
648 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 656 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
649 DMA_FROM_DEVICE); 657 DMA_FROM_DEVICE);
650 658
651 if (!elreq.rsp_sg_cnt) { 659 if (!elreq.rsp_sg_cnt) {
660 ql_log(ql_log_warn, vha, 0x701b,
661 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
652 rval = -ENOMEM; 662 rval = -ENOMEM;
653 goto done_unmap_req_sg; 663 goto done_unmap_req_sg;
654 } 664 }
655 665
656 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 666 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
657 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 667 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
658 DEBUG2(printk(KERN_INFO 668 ql_log(ql_log_warn, vha, 0x701c,
659 "dma mapping resulted in different sg counts " 669 "dma mapping resulted in different sg counts, "
660 "[request_sg_cnt: %x dma_request_sg_cnt: %x " 670 "request_sg_cnt: %x dma_request_sg_cnt: %x "
661 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 671 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
662 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 672 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
663 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt)); 673 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
664 rval = -EAGAIN; 674 rval = -EAGAIN;
665 goto done_unmap_sg; 675 goto done_unmap_sg;
666 } 676 }
@@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
668 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 678 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
669 &req_data_dma, GFP_KERNEL); 679 &req_data_dma, GFP_KERNEL);
670 if (!req_data) { 680 if (!req_data) {
671 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data " 681 ql_log(ql_log_warn, vha, 0x701d,
672 "failed for host=%lu\n", __func__, vha->host_no)); 682 "dma alloc failed for req_data.\n");
673 rval = -ENOMEM; 683 rval = -ENOMEM;
674 goto done_unmap_sg; 684 goto done_unmap_sg;
675 } 685 }
@@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
677 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 687 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
678 &rsp_data_dma, GFP_KERNEL); 688 &rsp_data_dma, GFP_KERNEL);
679 if (!rsp_data) { 689 if (!rsp_data) {
680 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data " 690 ql_log(ql_log_warn, vha, 0x7004,
681 "failed for host=%lu\n", __func__, vha->host_no)); 691 "dma alloc failed for rsp_data.\n");
682 rval = -ENOMEM; 692 rval = -ENOMEM;
683 goto done_free_dma_req; 693 goto done_free_dma_req;
684 } 694 }
@@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
699 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 709 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
700 elreq.options == EXTERNAL_LOOPBACK) { 710 elreq.options == EXTERNAL_LOOPBACK) {
701 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 711 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
702 DEBUG2(qla_printk(KERN_INFO, ha, 712 ql_dbg(ql_dbg_user, vha, 0x701e,
703 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type)); 713 "BSG request type: %s.\n", type);
704 command_sent = INT_DEF_LB_ECHO_CMD; 714 command_sent = INT_DEF_LB_ECHO_CMD;
705 rval = qla2x00_echo_test(vha, &elreq, response); 715 rval = qla2x00_echo_test(vha, &elreq, response);
706 } else { 716 } else {
@@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
708 memset(config, 0, sizeof(config)); 718 memset(config, 0, sizeof(config));
709 memset(new_config, 0, sizeof(new_config)); 719 memset(new_config, 0, sizeof(new_config));
710 if (qla81xx_get_port_config(vha, config)) { 720 if (qla81xx_get_port_config(vha, config)) {
711 DEBUG2(printk(KERN_ERR 721 ql_log(ql_log_warn, vha, 0x701f,
712 "%s(%lu): Get port config failed\n", 722 "Get port config failed.\n");
713 __func__, vha->host_no));
714 bsg_job->reply->reply_payload_rcv_len = 0; 723 bsg_job->reply->reply_payload_rcv_len = 0;
715 bsg_job->reply->result = (DID_ERROR << 16); 724 bsg_job->reply->result = (DID_ERROR << 16);
716 rval = -EPERM; 725 rval = -EPERM;
@@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
718 } 727 }
719 728
720 if (elreq.options != EXTERNAL_LOOPBACK) { 729 if (elreq.options != EXTERNAL_LOOPBACK) {
721 DEBUG2(qla_printk(KERN_INFO, ha, 730 ql_dbg(ql_dbg_user, vha, 0x7020,
722 "Internal: current port config = %x\n", 731 "Internal: curent port config = %x\n",
723 config[0])); 732 config[0]);
724 if (qla81xx_set_internal_loopback(vha, config, 733 if (qla81xx_set_internal_loopback(vha, config,
725 new_config)) { 734 new_config)) {
735 ql_log(ql_log_warn, vha, 0x7024,
736 "Internal loopback failed.\n");
726 bsg_job->reply->reply_payload_rcv_len = 737 bsg_job->reply->reply_payload_rcv_len =
727 0; 738 0;
728 bsg_job->reply->result = 739 bsg_job->reply->result =
@@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
746 } 757 }
747 758
748 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 759 type = "FC_BSG_HST_VENDOR_LOOPBACK";
749 DEBUG2(qla_printk(KERN_INFO, ha, 760 ql_dbg(ql_dbg_user, vha, 0x7028,
750 "scsi(%ld) bsg rqst type: %s\n", 761 "BSG request type: %s.\n", type);
751 vha->host_no, type));
752 762
753 command_sent = INT_DEF_LB_LOOPBACK_CMD; 763 command_sent = INT_DEF_LB_LOOPBACK_CMD;
754 rval = qla2x00_loopback_test(vha, &elreq, response); 764 rval = qla2x00_loopback_test(vha, &elreq, response);
@@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
763 773
764 if (response[0] == MBS_COMMAND_ERROR && 774 if (response[0] == MBS_COMMAND_ERROR &&
765 response[1] == MBS_LB_RESET) { 775 response[1] == MBS_LB_RESET) {
766 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing " 776 ql_log(ql_log_warn, vha, 0x7029,
767 "ISP\n", __func__, vha->host_no)); 777 "MBX command error, Aborting ISP.\n");
768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 778 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
769 qla2xxx_wake_dpc(vha); 779 qla2xxx_wake_dpc(vha);
770 qla2x00_wait_for_chip_reset(vha); 780 qla2x00_wait_for_chip_reset(vha);
771 /* Also reset the MPI */ 781 /* Also reset the MPI */
772 if (qla81xx_restart_mpi_firmware(vha) != 782 if (qla81xx_restart_mpi_firmware(vha) !=
773 QLA_SUCCESS) { 783 QLA_SUCCESS) {
774 qla_printk(KERN_INFO, ha, 784 ql_log(ql_log_warn, vha, 0x702a,
775 "MPI reset failed for host%ld.\n", 785 "MPI reset failed.\n");
776 vha->host_no);
777 } 786 }
778 787
779 bsg_job->reply->reply_payload_rcv_len = 0; 788 bsg_job->reply->reply_payload_rcv_len = 0;
@@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
783 } 792 }
784 } else { 793 } else {
785 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 794 type = "FC_BSG_HST_VENDOR_LOOPBACK";
786 DEBUG2(qla_printk(KERN_INFO, ha, 795 ql_dbg(ql_dbg_user, vha, 0x702b,
787 "scsi(%ld) bsg rqst type: %s\n", 796 "BSG request type: %s.\n", type);
788 vha->host_no, type));
789 command_sent = INT_DEF_LB_LOOPBACK_CMD; 797 command_sent = INT_DEF_LB_LOOPBACK_CMD;
790 rval = qla2x00_loopback_test(vha, &elreq, response); 798 rval = qla2x00_loopback_test(vha, &elreq, response);
791 } 799 }
792 } 800 }
793 801
794 if (rval) { 802 if (rval) {
795 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 803 ql_log(ql_log_warn, vha, 0x702c,
796 "request %s failed\n", vha->host_no, type)); 804 "Vendor request %s failed.\n", type);
797 805
798 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 806 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
799 sizeof(struct fc_bsg_reply); 807 sizeof(struct fc_bsg_reply);
@@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
805 bsg_job->reply->reply_payload_rcv_len = 0; 813 bsg_job->reply->reply_payload_rcv_len = 0;
806 bsg_job->reply->result = (DID_ERROR << 16); 814 bsg_job->reply->result = (DID_ERROR << 16);
807 } else { 815 } else {
808 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 816 ql_dbg(ql_dbg_user, vha, 0x702d,
809 "request %s completed\n", vha->host_no, type)); 817 "Vendor request %s completed.\n", type);
810 818
811 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 819 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
812 sizeof(response) + sizeof(uint8_t); 820 sizeof(response) + sizeof(uint8_t);
@@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
851 859
852 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 860 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
853 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 861 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
854 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 862 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
863 ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
855 return -EBUSY; 864 return -EBUSY;
865 }
856 866
857 if (!IS_QLA84XX(ha)) { 867 if (!IS_QLA84XX(ha)) {
858 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 868 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
859 "exiting.\n", vha->host_no));
860 return -EINVAL; 869 return -EINVAL;
861 } 870 }
862 871
@@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
865 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 874 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
866 875
867 if (rval) { 876 if (rval) {
868 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 877 ql_log(ql_log_warn, vha, 0x7030,
869 "request 84xx reset failed\n", vha->host_no)); 878 "Vendor request 84xx reset failed.\n");
870 rval = bsg_job->reply->reply_payload_rcv_len = 0; 879 rval = bsg_job->reply->reply_payload_rcv_len = 0;
871 bsg_job->reply->result = (DID_ERROR << 16); 880 bsg_job->reply->result = (DID_ERROR << 16);
872 881
873 } else { 882 } else {
874 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 883 ql_dbg(ql_dbg_user, vha, 0x7031,
875 "request 84xx reset completed\n", vha->host_no)); 884 "Vendor request 84xx reset completed.\n");
876 bsg_job->reply->result = DID_OK; 885 bsg_job->reply->result = DID_OK;
877 } 886 }
878 887
@@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
902 return -EBUSY; 911 return -EBUSY;
903 912
904 if (!IS_QLA84XX(ha)) { 913 if (!IS_QLA84XX(ha)) {
905 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 914 ql_dbg(ql_dbg_user, vha, 0x7032,
906 "exiting.\n", vha->host_no)); 915 "Not 84xx, exiting.\n");
907 return -EINVAL; 916 return -EINVAL;
908 } 917 }
909 918
910 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 919 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
911 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 920 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
912 if (!sg_cnt) 921 if (!sg_cnt) {
922 ql_log(ql_log_warn, vha, 0x7033,
923 "dma_map_sg returned %d for request.\n", sg_cnt);
913 return -ENOMEM; 924 return -ENOMEM;
925 }
914 926
915 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 927 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
916 DEBUG2(printk(KERN_INFO 928 ql_log(ql_log_warn, vha, 0x7034,
917 "dma mapping resulted in different sg counts " 929 "DMA mapping resulted in different sg counts, "
918 "request_sg_cnt: %x dma_request_sg_cnt: %x ", 930 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
919 bsg_job->request_payload.sg_cnt, sg_cnt)); 931 bsg_job->request_payload.sg_cnt, sg_cnt);
920 rval = -EAGAIN; 932 rval = -EAGAIN;
921 goto done_unmap_sg; 933 goto done_unmap_sg;
922 } 934 }
@@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
925 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 937 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
926 &fw_dma, GFP_KERNEL); 938 &fw_dma, GFP_KERNEL);
927 if (!fw_buf) { 939 if (!fw_buf) {
928 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf " 940 ql_log(ql_log_warn, vha, 0x7035,
929 "failed for host=%lu\n", __func__, vha->host_no)); 941 "DMA alloc failed for fw_buf.\n");
930 rval = -ENOMEM; 942 rval = -ENOMEM;
931 goto done_unmap_sg; 943 goto done_unmap_sg;
932 } 944 }
@@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
936 948
937 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 949 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
938 if (!mn) { 950 if (!mn) {
939 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " 951 ql_log(ql_log_warn, vha, 0x7036,
940 "failed for host=%lu\n", __func__, vha->host_no)); 952 "DMA alloc failed for fw buffer.\n");
941 rval = -ENOMEM; 953 rval = -ENOMEM;
942 goto done_free_fw_buf; 954 goto done_free_fw_buf;
943 } 955 }
@@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
965 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 977 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
966 978
967 if (rval) { 979 if (rval) {
968 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 980 ql_log(ql_log_warn, vha, 0x7037,
969 "request 84xx updatefw failed\n", vha->host_no)); 981 "Vendor request 84xx updatefw failed.\n");
970 982
971 rval = bsg_job->reply->reply_payload_rcv_len = 0; 983 rval = bsg_job->reply->reply_payload_rcv_len = 0;
972 bsg_job->reply->result = (DID_ERROR << 16); 984 bsg_job->reply->result = (DID_ERROR << 16);
973 985
974 } else { 986 } else {
975 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 987 ql_dbg(ql_dbg_user, vha, 0x7038,
976 "request 84xx updatefw completed\n", vha->host_no)); 988 "Vendor request 84xx updatefw completed.\n");
977 989
978 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 990 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
979 bsg_job->reply->result = DID_OK; 991 bsg_job->reply->result = DID_OK;
@@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1009 1021
1010 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1022 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1011 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1023 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1012 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 1024 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1025 ql_log(ql_log_warn, vha, 0x7039,
1026 "Abort active or needed.\n");
1013 return -EBUSY; 1027 return -EBUSY;
1028 }
1014 1029
1015 if (!IS_QLA84XX(ha)) { 1030 if (!IS_QLA84XX(ha)) {
1016 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 1031 ql_log(ql_log_warn, vha, 0x703a,
1017 "exiting.\n", vha->host_no)); 1032 "Not 84xx, exiting.\n");
1018 return -EINVAL; 1033 return -EINVAL;
1019 } 1034 }
1020 1035
1021 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request + 1036 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1022 sizeof(struct fc_bsg_request)); 1037 sizeof(struct fc_bsg_request));
1023 if (!ql84_mgmt) { 1038 if (!ql84_mgmt) {
1024 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n", 1039 ql_log(ql_log_warn, vha, 0x703b,
1025 __func__, vha->host_no)); 1040 "MGMT header not provided, exiting.\n");
1026 return -EINVAL; 1041 return -EINVAL;
1027 } 1042 }
1028 1043
1029 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1044 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1030 if (!mn) { 1045 if (!mn) {
1031 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " 1046 ql_log(ql_log_warn, vha, 0x703c,
1032 "failed for host=%lu\n", __func__, vha->host_no)); 1047 "DMA alloc failed for fw buffer.\n");
1033 return -ENOMEM; 1048 return -ENOMEM;
1034 } 1049 }
1035 1050
@@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1044 bsg_job->reply_payload.sg_list, 1059 bsg_job->reply_payload.sg_list,
1045 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1060 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1046 if (!sg_cnt) { 1061 if (!sg_cnt) {
1062 ql_log(ql_log_warn, vha, 0x703d,
1063 "dma_map_sg returned %d for reply.\n", sg_cnt);
1047 rval = -ENOMEM; 1064 rval = -ENOMEM;
1048 goto exit_mgmt; 1065 goto exit_mgmt;
1049 } 1066 }
@@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1051 dma_direction = DMA_FROM_DEVICE; 1068 dma_direction = DMA_FROM_DEVICE;
1052 1069
1053 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1070 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1054 DEBUG2(printk(KERN_INFO 1071 ql_log(ql_log_warn, vha, 0x703e,
1055 "dma mapping resulted in different sg counts " 1072 "DMA mapping resulted in different sg counts, "
1056 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n", 1073 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1057 bsg_job->reply_payload.sg_cnt, sg_cnt)); 1074 bsg_job->reply_payload.sg_cnt, sg_cnt);
1058 rval = -EAGAIN; 1075 rval = -EAGAIN;
1059 goto done_unmap_sg; 1076 goto done_unmap_sg;
1060 } 1077 }
@@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1064 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1081 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1065 &mgmt_dma, GFP_KERNEL); 1082 &mgmt_dma, GFP_KERNEL);
1066 if (!mgmt_b) { 1083 if (!mgmt_b) {
1067 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " 1084 ql_log(ql_log_warn, vha, 0x703f,
1068 "failed for host=%lu\n", 1085 "DMA alloc failed for mgmt_b.\n");
1069 __func__, vha->host_no));
1070 rval = -ENOMEM; 1086 rval = -ENOMEM;
1071 goto done_unmap_sg; 1087 goto done_unmap_sg;
1072 } 1088 }
@@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1094 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1110 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1095 1111
1096 if (!sg_cnt) { 1112 if (!sg_cnt) {
1113 ql_log(ql_log_warn, vha, 0x7040,
1114 "dma_map_sg returned %d.\n", sg_cnt);
1097 rval = -ENOMEM; 1115 rval = -ENOMEM;
1098 goto exit_mgmt; 1116 goto exit_mgmt;
1099 } 1117 }
@@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1101 dma_direction = DMA_TO_DEVICE; 1119 dma_direction = DMA_TO_DEVICE;
1102 1120
1103 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1121 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1104 DEBUG2(printk(KERN_INFO 1122 ql_log(ql_log_warn, vha, 0x7041,
1105 "dma mapping resulted in different sg counts " 1123 "DMA mapping resulted in different sg counts, "
1106 "request_sg_cnt: %x dma_request_sg_cnt: %x ", 1124 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1107 bsg_job->request_payload.sg_cnt, sg_cnt)); 1125 bsg_job->request_payload.sg_cnt, sg_cnt);
1108 rval = -EAGAIN; 1126 rval = -EAGAIN;
1109 goto done_unmap_sg; 1127 goto done_unmap_sg;
1110 } 1128 }
@@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1113 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1131 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1114 &mgmt_dma, GFP_KERNEL); 1132 &mgmt_dma, GFP_KERNEL);
1115 if (!mgmt_b) { 1133 if (!mgmt_b) {
1116 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " 1134 ql_log(ql_log_warn, vha, 0x7042,
1117 "failed for host=%lu\n", 1135 "DMA alloc failed for mgmt_b.\n");
1118 __func__, vha->host_no));
1119 rval = -ENOMEM; 1136 rval = -ENOMEM;
1120 goto done_unmap_sg; 1137 goto done_unmap_sg;
1121 } 1138 }
@@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1156 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1173 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1157 1174
1158 if (rval) { 1175 if (rval) {
1159 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 1176 ql_log(ql_log_warn, vha, 0x7043,
1160 "request 84xx mgmt failed\n", vha->host_no)); 1177 "Vendor request 84xx mgmt failed.\n");
1161 1178
1162 rval = bsg_job->reply->reply_payload_rcv_len = 0; 1179 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1163 bsg_job->reply->result = (DID_ERROR << 16); 1180 bsg_job->reply->result = (DID_ERROR << 16);
1164 1181
1165 } else { 1182 } else {
1166 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 1183 ql_dbg(ql_dbg_user, vha, 0x7044,
1167 "request 84xx mgmt completed\n", vha->host_no)); 1184 "Vendor request 84xx mgmt completed.\n");
1168 1185
1169 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1186 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1170 bsg_job->reply->result = DID_OK; 1187 bsg_job->reply->result = DID_OK;
@@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1204{ 1221{
1205 struct Scsi_Host *host = bsg_job->shost; 1222 struct Scsi_Host *host = bsg_job->shost;
1206 scsi_qla_host_t *vha = shost_priv(host); 1223 scsi_qla_host_t *vha = shost_priv(host);
1207 struct qla_hw_data *ha = vha->hw;
1208 int rval = 0; 1224 int rval = 0;
1209 struct qla_port_param *port_param = NULL; 1225 struct qla_port_param *port_param = NULL;
1210 fc_port_t *fcport = NULL; 1226 fc_port_t *fcport = NULL;
@@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1215 1231
1216 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1232 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1217 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1233 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1218 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 1234 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1235 ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
1219 return -EBUSY; 1236 return -EBUSY;
1237 }
1220 1238
1221 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1239 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1222 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not " 1240 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1223 "supported\n", __func__, vha->host_no));
1224 return -EINVAL; 1241 return -EINVAL;
1225 } 1242 }
1226 1243
1227 port_param = (struct qla_port_param *)((char *)bsg_job->request + 1244 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1228 sizeof(struct fc_bsg_request)); 1245 sizeof(struct fc_bsg_request));
1229 if (!port_param) { 1246 if (!port_param) {
1230 DEBUG2(printk("%s(%ld): port_param header not provided, " 1247 ql_log(ql_log_warn, vha, 0x7047,
1231 "exiting.\n", __func__, vha->host_no)); 1248 "port_param header not provided.\n");
1232 return -EINVAL; 1249 return -EINVAL;
1233 } 1250 }
1234 1251
1235 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1252 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1236 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n", 1253 ql_log(ql_log_warn, vha, 0x7048,
1237 __func__, vha->host_no)); 1254 "Invalid destination type.\n");
1238 return -EINVAL; 1255 return -EINVAL;
1239 } 1256 }
1240 1257
@@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1249 } 1266 }
1250 1267
1251 if (!fcport) { 1268 if (!fcport) {
1252 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n", 1269 ql_log(ql_log_warn, vha, 0x7049,
1253 __func__, vha->host_no)); 1270 "Failed to find port.\n");
1254 return -EINVAL; 1271 return -EINVAL;
1255 } 1272 }
1256 1273
1257 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1274 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1258 DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n", 1275 ql_log(ql_log_warn, vha, 0x704a,
1259 __func__, vha->host_no)); 1276 "Port is not online.\n");
1260 return -EINVAL; 1277 return -EINVAL;
1261 } 1278 }
1262 1279
1263 if (fcport->flags & FCF_LOGIN_NEEDED) { 1280 if (fcport->flags & FCF_LOGIN_NEEDED) {
1264 DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, " 1281 ql_log(ql_log_warn, vha, 0x704b,
1265 "flags = 0x%x\n", 1282 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1266 __func__, vha->host_no, fcport->flags));
1267 return -EINVAL; 1283 return -EINVAL;
1268 } 1284 }
1269 1285
@@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1275 &port_param->speed, mb); 1291 &port_param->speed, mb);
1276 1292
1277 if (rval) { 1293 if (rval) {
1278 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for " 1294 ql_log(ql_log_warn, vha, 0x704c,
1279 "%02x%02x%02x%02x%02x%02x%02x%02x -- " 1295 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1280 "%04x %x %04x %04x.\n", 1296 "%04x %x %04x %04x.\n", fcport->port_name[0],
1281 vha->host_no, fcport->port_name[0], 1297 fcport->port_name[1], fcport->port_name[2],
1282 fcport->port_name[1], 1298 fcport->port_name[3], fcport->port_name[4],
1283 fcport->port_name[2], fcport->port_name[3], 1299 fcport->port_name[5], fcport->port_name[6],
1284 fcport->port_name[4], fcport->port_name[5], 1300 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1285 fcport->port_name[6], fcport->port_name[7], rval,
1286 fcport->fp_speed, mb[0], mb[1]));
1287 rval = 0; 1301 rval = 0;
1288 bsg_job->reply->result = (DID_ERROR << 16); 1302 bsg_job->reply->result = (DID_ERROR << 16);
1289 1303
@@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1307} 1321}
1308 1322
1309static int 1323static int
1310qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, 1324qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1311 uint8_t is_update) 1325 uint8_t is_update)
1312{ 1326{
1313 uint32_t start = 0; 1327 uint32_t start = 0;
1314 int valid = 0; 1328 int valid = 0;
1329 struct qla_hw_data *ha = vha->hw;
1315 1330
1316 bsg_job->reply->reply_payload_rcv_len = 0; 1331 bsg_job->reply->reply_payload_rcv_len = 0;
1317 1332
@@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1319 return -EINVAL; 1334 return -EINVAL;
1320 1335
1321 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1336 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1322 if (start > ha->optrom_size) 1337 if (start > ha->optrom_size) {
1338 ql_log(ql_log_warn, vha, 0x7055,
1339 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1323 return -EINVAL; 1340 return -EINVAL;
1341 }
1324 1342
1325 if (ha->optrom_state != QLA_SWAITING) 1343 if (ha->optrom_state != QLA_SWAITING) {
1344 ql_log(ql_log_info, vha, 0x7056,
1345 "optrom_state %d.\n", ha->optrom_state);
1326 return -EBUSY; 1346 return -EBUSY;
1347 }
1327 1348
1328 ha->optrom_region_start = start; 1349 ha->optrom_region_start = start;
1329 1350 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1330 if (is_update) { 1351 if (is_update) {
1331 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1352 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1332 valid = 1; 1353 valid = 1;
@@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1337 IS_QLA8XXX_TYPE(ha)) 1358 IS_QLA8XXX_TYPE(ha))
1338 valid = 1; 1359 valid = 1;
1339 if (!valid) { 1360 if (!valid) {
1340 qla_printk(KERN_WARNING, ha, 1361 ql_log(ql_log_warn, vha, 0x7058,
1341 "Invalid start region 0x%x/0x%x.\n", 1362 "Invalid start region 0x%x/0x%x.\n", start,
1342 start, bsg_job->request_payload.payload_len); 1363 bsg_job->request_payload.payload_len);
1343 return -EINVAL; 1364 return -EINVAL;
1344 } 1365 }
1345 1366
@@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1358 1379
1359 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1380 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1360 if (!ha->optrom_buffer) { 1381 if (!ha->optrom_buffer) {
1361 qla_printk(KERN_WARNING, ha, 1382 ql_log(ql_log_warn, vha, 0x7059,
1362 "Read: Unable to allocate memory for optrom retrieval " 1383 "Read: Unable to allocate memory for optrom retrieval "
1363 "(%x).\n", ha->optrom_region_size); 1384 "(%x)\n", ha->optrom_region_size);
1364 1385
1365 ha->optrom_state = QLA_SWAITING; 1386 ha->optrom_state = QLA_SWAITING;
1366 return -ENOMEM; 1387 return -ENOMEM;
@@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1378 struct qla_hw_data *ha = vha->hw; 1399 struct qla_hw_data *ha = vha->hw;
1379 int rval = 0; 1400 int rval = 0;
1380 1401
1381 rval = qla2x00_optrom_setup(bsg_job, ha, 0); 1402 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1382 if (rval) 1403 if (rval)
1383 return rval; 1404 return rval;
1384 1405
@@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1406 struct qla_hw_data *ha = vha->hw; 1427 struct qla_hw_data *ha = vha->hw;
1407 int rval = 0; 1428 int rval = 0;
1408 1429
1409 rval = qla2x00_optrom_setup(bsg_job, ha, 1); 1430 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1410 if (rval) 1431 if (rval)
1411 return rval; 1432 return rval;
1412 1433
@@ -1464,6 +1485,23 @@ int
1464qla24xx_bsg_request(struct fc_bsg_job *bsg_job) 1485qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1465{ 1486{
1466 int ret = -EINVAL; 1487 int ret = -EINVAL;
1488 struct fc_rport *rport;
1489 fc_port_t *fcport = NULL;
1490 struct Scsi_Host *host;
1491 scsi_qla_host_t *vha;
1492
1493 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1494 rport = bsg_job->rport;
1495 fcport = *(fc_port_t **) rport->dd_data;
1496 host = rport_to_shost(rport);
1497 vha = shost_priv(host);
1498 } else {
1499 host = bsg_job->shost;
1500 vha = shost_priv(host);
1501 }
1502
1503 ql_dbg(ql_dbg_user, vha, 0x7000,
1504 "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
1467 1505
1468 switch (bsg_job->request->msgcode) { 1506 switch (bsg_job->request->msgcode) {
1469 case FC_BSG_RPT_ELS: 1507 case FC_BSG_RPT_ELS:
@@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1480 case FC_BSG_HST_DEL_RPORT: 1518 case FC_BSG_HST_DEL_RPORT:
1481 case FC_BSG_RPT_CT: 1519 case FC_BSG_RPT_CT:
1482 default: 1520 default:
1483 DEBUG2(printk("qla2xxx: unsupported BSG request\n")); 1521 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1484 break; 1522 break;
1485 } 1523 }
1486 return ret; 1524 return ret;
@@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1514 && (sp_bsg->u.bsg_job == bsg_job)) { 1552 && (sp_bsg->u.bsg_job == bsg_job)) {
1515 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1553 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1516 if (ha->isp_ops->abort_command(sp)) { 1554 if (ha->isp_ops->abort_command(sp)) {
1517 DEBUG2(qla_printk(KERN_INFO, ha, 1555 ql_log(ql_log_warn, vha, 0x7089,
1518 "scsi(%ld): mbx " 1556 "mbx abort_command "
1519 "abort_command failed\n", 1557 "failed.\n");
1520 vha->host_no));
1521 bsg_job->req->errors = 1558 bsg_job->req->errors =
1522 bsg_job->reply->result = -EIO; 1559 bsg_job->reply->result = -EIO;
1523 } else { 1560 } else {
1524 DEBUG2(qla_printk(KERN_INFO, ha, 1561 ql_dbg(ql_dbg_user, vha, 0x708a,
1525 "scsi(%ld): mbx " 1562 "mbx abort_command "
1526 "abort_command success\n", 1563 "success.\n");
1527 vha->host_no));
1528 bsg_job->req->errors = 1564 bsg_job->req->errors =
1529 bsg_job->reply->result = 0; 1565 bsg_job->reply->result = 0;
1530 } 1566 }
@@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1535 } 1571 }
1536 } 1572 }
1537 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1538 DEBUG2(qla_printk(KERN_INFO, ha, 1574 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1539 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1540 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; 1575 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1541 return 0; 1576 return 0;
1542 1577
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index dba9eedc3ded..5c1e69f091e5 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,18 +11,18 @@
11 * ----------------------------------------------------- 11 * -----------------------------------------------------
12 * | Level | Last Value Used | 12 * | Level | Last Value Used |
13 * ----------------------------------------------------- 13 * -----------------------------------------------------
14 * | Module Init and Probe | 0x0109 | 14 * | Module Init and Probe | 0x0116 |
15 * | Mailbox commands | 0x1120 | 15 * | Mailbox commands | 0x111e |
16 * | Device Discovery | 0x207d | 16 * | Device Discovery | 0x2083 |
17 * | Queue Command and IO tracing | 0x304f | 17 * | Queue Command and IO tracing | 0x302e |
18 * | DPC Thread | 0x401c | 18 * | DPC Thread | 0x401c |
19 * | Async Events | 0x5058 | 19 * | Async Events | 0x5059 |
20 * | Timer Routines | 0x600d | 20 * | Timer Routines | 0x600d |
21 * | User Space Interactions | 0x70a1 | 21 * | User Space Interactions | 0x709c |
22 * | Task Management | 0x8032 | 22 * | Task Management | 0x8043 |
23 * | AER/EEH | 0x9010 | 23 * | AER/EEH | 0x900f |
24 * | Virtual Port | 0xa007 | 24 * | Virtual Port | 0xa007 |
25 * | ISP82XX Specific | 0xb028 | 25 * | ISP82XX Specific | 0xb027 |
26 * | MultiQ | 0xc00b | 26 * | MultiQ | 0xc00b |
27 * | Misc | 0xd00b | 27 * | Misc | 0xd00b |
28 * ----------------------------------------------------- 28 * -----------------------------------------------------
@@ -409,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
409 struct qla_hw_data *ha = vha->hw; 409 struct qla_hw_data *ha = vha->hw;
410 410
411 if (rval != QLA_SUCCESS) { 411 if (rval != QLA_SUCCESS) {
412 qla_printk(KERN_WARNING, ha, 412 ql_log(ql_log_warn, vha, 0xd000,
413 "Failed to dump firmware (%x)!!!\n", rval); 413 "Failed to dump firmware (%x).\n", rval);
414 ha->fw_dumped = 0; 414 ha->fw_dumped = 0;
415 } else { 415 } else {
416 qla_printk(KERN_INFO, ha, 416 ql_log(ql_log_info, vha, 0xd001,
417 "Firmware dump saved to temp buffer (%ld/%p).\n", 417 "Firmware dump saved to temp buffer (%ld/%p).\n",
418 vha->host_no, ha->fw_dump); 418 vha->host_no, ha->fw_dump);
419 ha->fw_dumped = 1; 419 ha->fw_dumped = 1;
@@ -445,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
445 spin_lock_irqsave(&ha->hardware_lock, flags); 445 spin_lock_irqsave(&ha->hardware_lock, flags);
446 446
447 if (!ha->fw_dump) { 447 if (!ha->fw_dump) {
448 qla_printk(KERN_WARNING, ha, 448 ql_log(ql_log_warn, vha, 0xd002,
449 "No buffer available for dump!!!\n"); 449 "No buffer available for dump.\n");
450 goto qla2300_fw_dump_failed; 450 goto qla2300_fw_dump_failed;
451 } 451 }
452 452
453 if (ha->fw_dumped) { 453 if (ha->fw_dumped) {
454 qla_printk(KERN_WARNING, ha, 454 ql_log(ql_log_warn, vha, 0xd003,
455 "Firmware has been previously dumped (%p) -- ignoring " 455 "Firmware has been previously dumped (%p) "
456 "request...\n", ha->fw_dump); 456 "-- ignoring request.\n",
457 ha->fw_dump);
457 goto qla2300_fw_dump_failed; 458 goto qla2300_fw_dump_failed;
458 } 459 }
459 fw = &ha->fw_dump->isp.isp23; 460 fw = &ha->fw_dump->isp.isp23;
@@ -608,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
608 spin_lock_irqsave(&ha->hardware_lock, flags); 609 spin_lock_irqsave(&ha->hardware_lock, flags);
609 610
610 if (!ha->fw_dump) { 611 if (!ha->fw_dump) {
611 qla_printk(KERN_WARNING, ha, 612 ql_log(ql_log_warn, vha, 0xd004,
612 "No buffer available for dump!!!\n"); 613 "No buffer available for dump.\n");
613 goto qla2100_fw_dump_failed; 614 goto qla2100_fw_dump_failed;
614 } 615 }
615 616
616 if (ha->fw_dumped) { 617 if (ha->fw_dumped) {
617 qla_printk(KERN_WARNING, ha, 618 ql_log(ql_log_warn, vha, 0xd005,
618 "Firmware has been previously dumped (%p) -- ignoring " 619 "Firmware has been previously dumped (%p) "
619 "request...\n", ha->fw_dump); 620 "-- ignoring request.\n",
621 ha->fw_dump);
620 goto qla2100_fw_dump_failed; 622 goto qla2100_fw_dump_failed;
621 } 623 }
622 fw = &ha->fw_dump->isp.isp21; 624 fw = &ha->fw_dump->isp.isp21;
@@ -805,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
805 spin_lock_irqsave(&ha->hardware_lock, flags); 807 spin_lock_irqsave(&ha->hardware_lock, flags);
806 808
807 if (!ha->fw_dump) { 809 if (!ha->fw_dump) {
808 qla_printk(KERN_WARNING, ha, 810 ql_log(ql_log_warn, vha, 0xd006,
809 "No buffer available for dump!!!\n"); 811 "No buffer available for dump.\n");
810 goto qla24xx_fw_dump_failed; 812 goto qla24xx_fw_dump_failed;
811 } 813 }
812 814
813 if (ha->fw_dumped) { 815 if (ha->fw_dumped) {
814 qla_printk(KERN_WARNING, ha, 816 ql_log(ql_log_warn, vha, 0xd007,
815 "Firmware has been previously dumped (%p) -- ignoring " 817 "Firmware has been previously dumped (%p) "
816 "request...\n", ha->fw_dump); 818 "-- ignoring request.\n",
819 ha->fw_dump);
817 goto qla24xx_fw_dump_failed; 820 goto qla24xx_fw_dump_failed;
818 } 821 }
819 fw = &ha->fw_dump->isp.isp24; 822 fw = &ha->fw_dump->isp.isp24;
@@ -1043,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1043 spin_lock_irqsave(&ha->hardware_lock, flags); 1046 spin_lock_irqsave(&ha->hardware_lock, flags);
1044 1047
1045 if (!ha->fw_dump) { 1048 if (!ha->fw_dump) {
1046 qla_printk(KERN_WARNING, ha, 1049 ql_log(ql_log_warn, vha, 0xd008,
1047 "No buffer available for dump!!!\n"); 1050 "No buffer available for dump.\n");
1048 goto qla25xx_fw_dump_failed; 1051 goto qla25xx_fw_dump_failed;
1049 } 1052 }
1050 1053
1051 if (ha->fw_dumped) { 1054 if (ha->fw_dumped) {
1052 qla_printk(KERN_WARNING, ha, 1055 ql_log(ql_log_warn, vha, 0xd009,
1053 "Firmware has been previously dumped (%p) -- ignoring " 1056 "Firmware has been previously dumped (%p) "
1054 "request...\n", ha->fw_dump); 1057 "-- ignoring request.\n",
1058 ha->fw_dump);
1055 goto qla25xx_fw_dump_failed; 1059 goto qla25xx_fw_dump_failed;
1056 } 1060 }
1057 fw = &ha->fw_dump->isp.isp25; 1061 fw = &ha->fw_dump->isp.isp25;
@@ -1354,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1354 spin_lock_irqsave(&ha->hardware_lock, flags); 1358 spin_lock_irqsave(&ha->hardware_lock, flags);
1355 1359
1356 if (!ha->fw_dump) { 1360 if (!ha->fw_dump) {
1357 qla_printk(KERN_WARNING, ha, 1361 ql_log(ql_log_warn, vha, 0xd00a,
1358 "No buffer available for dump!!!\n"); 1362 "No buffer available for dump.\n");
1359 goto qla81xx_fw_dump_failed; 1363 goto qla81xx_fw_dump_failed;
1360 } 1364 }
1361 1365
1362 if (ha->fw_dumped) { 1366 if (ha->fw_dumped) {
1363 qla_printk(KERN_WARNING, ha, 1367 ql_log(ql_log_warn, vha, 0xd00b,
1364 "Firmware has been previously dumped (%p) -- ignoring " 1368 "Firmware has been previously dumped (%p) "
1365 "request...\n", ha->fw_dump); 1369 "-- ignoring request.\n",
1370 ha->fw_dump);
1366 goto qla81xx_fw_dump_failed; 1371 goto qla81xx_fw_dump_failed;
1367 } 1372 }
1368 fw = &ha->fw_dump->isp.isp81; 1373 fw = &ha->fw_dump->isp.isp81;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a5a4e1275bf2..0b4c2b794c6f 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
64 /* Pause tracing to flush FCE buffers. */ 64 /* Pause tracing to flush FCE buffers. */
65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd); 65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
66 if (rval) 66 if (rval)
67 qla_printk(KERN_WARNING, ha, 67 ql_dbg(ql_dbg_user, vha, 0x705c,
68 "DebugFS: Unable to disable FCE (%d).\n", rval); 68 "DebugFS: Unable to disable FCE (%d).\n", rval);
69 69
70 ha->flags.fce_enabled = 0; 70 ha->flags.fce_enabled = 0;
@@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, 92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
93 ha->fce_mb, &ha->fce_bufs); 93 ha->fce_mb, &ha->fce_bufs);
94 if (rval) { 94 if (rval) {
95 qla_printk(KERN_WARNING, ha, 95 ql_dbg(ql_dbg_user, vha, 0x700d,
96 "DebugFS: Unable to reinitialize FCE (%d).\n", rval); 96 "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
97 ha->flags.fce_enabled = 0; 97 ha->flags.fce_enabled = 0;
98 } 98 }
@@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
125 atomic_set(&qla2x00_dfs_root_count, 0); 125 atomic_set(&qla2x00_dfs_root_count, 0);
126 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); 126 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
127 if (!qla2x00_dfs_root) { 127 if (!qla2x00_dfs_root) {
128 qla_printk(KERN_NOTICE, ha, 128 ql_log(ql_log_warn, vha, 0x00f7,
129 "DebugFS: Unable to create root directory.\n"); 129 "Unable to create debugfs root directory.\n");
130 goto out; 130 goto out;
131 } 131 }
132 132
@@ -137,8 +137,8 @@ create_dir:
137 mutex_init(&ha->fce_mutex); 137 mutex_init(&ha->fce_mutex);
138 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); 138 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
139 if (!ha->dfs_dir) { 139 if (!ha->dfs_dir) {
140 qla_printk(KERN_NOTICE, ha, 140 ql_log(ql_log_warn, vha, 0x00f8,
141 "DebugFS: Unable to create ha directory.\n"); 141 "Unable to create debugfs ha directory.\n");
142 goto out; 142 goto out;
143 } 143 }
144 144
@@ -148,8 +148,8 @@ create_nodes:
148 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, 148 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
149 &dfs_fce_ops); 149 &dfs_fce_ops);
150 if (!ha->dfs_fce) { 150 if (!ha->dfs_fce) {
151 qla_printk(KERN_NOTICE, ha, 151 ql_log(ql_log_warn, vha, 0x00f9,
152 "DebugFS: Unable to fce node.\n"); 152 "Unable to create debugfs fce node.\n");
153 goto out; 153 goto out;
154 } 154 }
155out: 155out:
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 38aef5d0515f..29b1a3e28231 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp);
185int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 185int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
186 uint16_t, uint16_t, uint8_t); 186 uint16_t, uint16_t, uint8_t);
187extern int qla2x00_start_sp(srb_t *); 187extern int qla2x00_start_sp(srb_t *);
188extern uint16_t qla24xx_calc_iocbs(uint16_t); 188extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
190extern int qla24xx_dif_start_scsi(srb_t *); 190extern int qla24xx_dif_start_scsi(srb_t *);
191 191
@@ -481,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16
481extern int qla2x00_echo_test(scsi_qla_host_t *, 481extern int qla2x00_echo_test(scsi_qla_host_t *,
482 struct msg_echo_lb *, uint16_t *); 482 struct msg_echo_lb *, uint16_t *);
483extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *); 483extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
484extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t); 484extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
485 struct qla_fcp_prio_cfg *, uint8_t);
485 486
486/* 487/*
487 * Global Function Prototypes in qla_dfs.c source file. 488 * Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 8cd9066ad906..37937aa3c3b8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
121 121
122 rval = QLA_FUNCTION_FAILED; 122 rval = QLA_FUNCTION_FAILED;
123 if (ms_pkt->entry_status != 0) { 123 if (ms_pkt->entry_status != 0) {
124 DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status " 124 ql_dbg(ql_dbg_disc, vha, 0x2031,
125 "(%x) on port_id: %02x%02x%02x.\n", 125 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
126 vha->host_no, routine, ms_pkt->entry_status, 126 routine, ms_pkt->entry_status, vha->d_id.b.domain,
127 vha->d_id.b.domain, vha->d_id.b.area, 127 vha->d_id.b.area, vha->d_id.b.al_pa);
128 vha->d_id.b.al_pa));
129 } else { 128 } else {
130 if (IS_FWI2_CAPABLE(ha)) 129 if (IS_FWI2_CAPABLE(ha))
131 comp_status = le16_to_cpu( 130 comp_status = le16_to_cpu(
@@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
138 case CS_DATA_OVERRUN: /* Overrun? */ 137 case CS_DATA_OVERRUN: /* Overrun? */
139 if (ct_rsp->header.response != 138 if (ct_rsp->header.response !=
140 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 139 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
141 DEBUG2_3(printk("scsi(%ld): %s failed, " 140 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
142 "rejected request on port_id: %02x%02x%02x\n", 141 "%s failed rejected request on port_id: "
143 vha->host_no, routine, 142 "%02x%02x%02x.\n", routine,
144 vha->d_id.b.domain, vha->d_id.b.area, 143 vha->d_id.b.domain, vha->d_id.b.area,
145 vha->d_id.b.al_pa)); 144 vha->d_id.b.al_pa);
146 DEBUG2_3(qla2x00_dump_buffer( 145 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
147 (uint8_t *)&ct_rsp->header, 146 0x2078, (uint8_t *)&ct_rsp->header,
148 sizeof(struct ct_rsp_hdr))); 147 sizeof(struct ct_rsp_hdr));
149 rval = QLA_INVALID_COMMAND; 148 rval = QLA_INVALID_COMMAND;
150 } else 149 } else
151 rval = QLA_SUCCESS; 150 rval = QLA_SUCCESS;
152 break; 151 break;
153 default: 152 default:
154 DEBUG2_3(printk("scsi(%ld): %s failed, completion " 153 ql_dbg(ql_dbg_disc, vha, 0x2033,
155 "status (%x) on port_id: %02x%02x%02x.\n", 154 "%s failed, completion status (%x) on port_id: "
156 vha->host_no, routine, comp_status, 155 "%02x%02x%02x.\n", routine, comp_status,
157 vha->d_id.b.domain, vha->d_id.b.area, 156 vha->d_id.b.domain, vha->d_id.b.area,
158 vha->d_id.b.al_pa)); 157 vha->d_id.b.al_pa);
159 break; 158 break;
160 } 159 }
161 } 160 }
@@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
202 sizeof(ms_iocb_entry_t)); 201 sizeof(ms_iocb_entry_t));
203 if (rval != QLA_SUCCESS) { 202 if (rval != QLA_SUCCESS) {
204 /*EMPTY*/ 203 /*EMPTY*/
205 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", 204 ql_dbg(ql_dbg_disc, vha, 0x2062,
206 vha->host_no, rval)); 205 "GA_NXT issue IOCB failed (%d).\n", rval);
207 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != 206 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
208 QLA_SUCCESS) { 207 QLA_SUCCESS) {
209 rval = QLA_FUNCTION_FAILED; 208 rval = QLA_FUNCTION_FAILED;
@@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
222 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) 221 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
223 fcport->d_id.b.domain = 0xf0; 222 fcport->d_id.b.domain = 0xf0;
224 223
225 DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " 224 ql_dbg(ql_dbg_disc, vha, 0x2063,
226 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 225 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
227 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 226 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
228 "portid=%02x%02x%02x.\n", 227 "port_id=%02x%02x%02x.\n",
229 vha->host_no,
230 fcport->node_name[0], fcport->node_name[1], 228 fcport->node_name[0], fcport->node_name[1],
231 fcport->node_name[2], fcport->node_name[3], 229 fcport->node_name[2], fcport->node_name[3],
232 fcport->node_name[4], fcport->node_name[5], 230 fcport->node_name[4], fcport->node_name[5],
@@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
236 fcport->port_name[4], fcport->port_name[5], 234 fcport->port_name[4], fcport->port_name[5],
237 fcport->port_name[6], fcport->port_name[7], 235 fcport->port_name[6], fcport->port_name[7],
238 fcport->d_id.b.domain, fcport->d_id.b.area, 236 fcport->d_id.b.domain, fcport->d_id.b.area,
239 fcport->d_id.b.al_pa)); 237 fcport->d_id.b.al_pa);
240 } 238 }
241 239
242 return (rval); 240 return (rval);
@@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
287 sizeof(ms_iocb_entry_t)); 285 sizeof(ms_iocb_entry_t));
288 if (rval != QLA_SUCCESS) { 286 if (rval != QLA_SUCCESS) {
289 /*EMPTY*/ 287 /*EMPTY*/
290 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", 288 ql_dbg(ql_dbg_disc, vha, 0x2055,
291 vha->host_no, rval)); 289 "GID_PT issue IOCB failed (%d).\n", rval);
292 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != 290 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
293 QLA_SUCCESS) { 291 QLA_SUCCESS) {
294 rval = QLA_FUNCTION_FAILED; 292 rval = QLA_FUNCTION_FAILED;
@@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
364 sizeof(ms_iocb_entry_t)); 362 sizeof(ms_iocb_entry_t));
365 if (rval != QLA_SUCCESS) { 363 if (rval != QLA_SUCCESS) {
366 /*EMPTY*/ 364 /*EMPTY*/
367 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " 365 ql_dbg(ql_dbg_disc, vha, 0x2056,
368 "(%d).\n", vha->host_no, rval)); 366 "GPN_ID issue IOCB failed (%d).\n", rval);
369 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 367 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
370 "GPN_ID") != QLA_SUCCESS) { 368 "GPN_ID") != QLA_SUCCESS) {
371 rval = QLA_FUNCTION_FAILED; 369 rval = QLA_FUNCTION_FAILED;
@@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
424 sizeof(ms_iocb_entry_t)); 422 sizeof(ms_iocb_entry_t));
425 if (rval != QLA_SUCCESS) { 423 if (rval != QLA_SUCCESS) {
426 /*EMPTY*/ 424 /*EMPTY*/
427 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed " 425 ql_dbg(ql_dbg_disc, vha, 0x2057,
428 "(%d).\n", vha->host_no, rval)); 426 "GNN_ID issue IOCB failed (%d).\n", rval);
429 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 427 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
430 "GNN_ID") != QLA_SUCCESS) { 428 "GNN_ID") != QLA_SUCCESS) {
431 rval = QLA_FUNCTION_FAILED; 429 rval = QLA_FUNCTION_FAILED;
@@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
434 memcpy(list[i].node_name, 432 memcpy(list[i].node_name,
435 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); 433 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
436 434
437 DEBUG2_3(printk("scsi(%ld): GID_PT entry - " 435 ql_dbg(ql_dbg_disc, vha, 0x2058,
438 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 436 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
439 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 437 "pn %02x%02x%02x%02x%02x%02x%02X%02x "
440 "portid=%02x%02x%02x.\n", 438 "portid=%02x%02x%02x.\n",
441 vha->host_no,
442 list[i].node_name[0], list[i].node_name[1], 439 list[i].node_name[0], list[i].node_name[1],
443 list[i].node_name[2], list[i].node_name[3], 440 list[i].node_name[2], list[i].node_name[3],
444 list[i].node_name[4], list[i].node_name[5], 441 list[i].node_name[4], list[i].node_name[5],
@@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
448 list[i].port_name[4], list[i].port_name[5], 445 list[i].port_name[4], list[i].port_name[5],
449 list[i].port_name[6], list[i].port_name[7], 446 list[i].port_name[6], list[i].port_name[7],
450 list[i].d_id.b.domain, list[i].d_id.b.area, 447 list[i].d_id.b.domain, list[i].d_id.b.area,
451 list[i].d_id.b.al_pa)); 448 list[i].d_id.b.al_pa);
452 } 449 }
453 450
454 /* Last device exit. */ 451 /* Last device exit. */
@@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
499 sizeof(ms_iocb_entry_t)); 496 sizeof(ms_iocb_entry_t));
500 if (rval != QLA_SUCCESS) { 497 if (rval != QLA_SUCCESS) {
501 /*EMPTY*/ 498 /*EMPTY*/
502 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n", 499 ql_dbg(ql_dbg_disc, vha, 0x2043,
503 vha->host_no, rval)); 500 "RFT_ID issue IOCB failed (%d).\n", rval);
504 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") != 501 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
505 QLA_SUCCESS) { 502 QLA_SUCCESS) {
506 rval = QLA_FUNCTION_FAILED; 503 rval = QLA_FUNCTION_FAILED;
507 } else { 504 } else {
508 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 505 ql_dbg(ql_dbg_disc, vha, 0x2044,
509 vha->host_no)); 506 "RFT_ID exiting normally.\n");
510 } 507 }
511 508
512 return (rval); 509 return (rval);
@@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
528 struct ct_sns_rsp *ct_rsp; 525 struct ct_sns_rsp *ct_rsp;
529 526
530 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 527 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
531 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " 528 ql_dbg(ql_dbg_disc, vha, 0x2046,
532 "ISP2100/ISP2200.\n", vha->host_no)); 529 "RFF_ID call not supported on ISP2100/ISP2200.\n");
533 return (QLA_SUCCESS); 530 return (QLA_SUCCESS);
534 } 531 }
535 532
@@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
556 sizeof(ms_iocb_entry_t)); 553 sizeof(ms_iocb_entry_t));
557 if (rval != QLA_SUCCESS) { 554 if (rval != QLA_SUCCESS) {
558 /*EMPTY*/ 555 /*EMPTY*/
559 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", 556 ql_dbg(ql_dbg_disc, vha, 0x2047,
560 vha->host_no, rval)); 557 "RFF_ID issue IOCB failed (%d).\n", rval);
561 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") != 558 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
562 QLA_SUCCESS) { 559 QLA_SUCCESS) {
563 rval = QLA_FUNCTION_FAILED; 560 rval = QLA_FUNCTION_FAILED;
564 } else { 561 } else {
565 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", 562 ql_dbg(ql_dbg_disc, vha, 0x2048,
566 vha->host_no)); 563 "RFF_ID exiting normally.\n");
567 } 564 }
568 565
569 return (rval); 566 return (rval);
@@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
609 sizeof(ms_iocb_entry_t)); 606 sizeof(ms_iocb_entry_t));
610 if (rval != QLA_SUCCESS) { 607 if (rval != QLA_SUCCESS) {
611 /*EMPTY*/ 608 /*EMPTY*/
612 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n", 609 ql_dbg(ql_dbg_disc, vha, 0x204d,
613 vha->host_no, rval)); 610 "RNN_ID issue IOCB failed (%d).\n", rval);
614 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") != 611 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
615 QLA_SUCCESS) { 612 QLA_SUCCESS) {
616 rval = QLA_FUNCTION_FAILED; 613 rval = QLA_FUNCTION_FAILED;
617 } else { 614 } else {
618 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 615 ql_dbg(ql_dbg_disc, vha, 0x204e,
619 vha->host_no)); 616 "RNN_ID exiting normally.\n");
620 } 617 }
621 618
622 return (rval); 619 return (rval);
@@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
647 struct ct_sns_rsp *ct_rsp; 644 struct ct_sns_rsp *ct_rsp;
648 645
649 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 646 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
650 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " 647 ql_dbg(ql_dbg_disc, vha, 0x2050,
651 "ISP2100/ISP2200.\n", vha->host_no)); 648 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
652 return (QLA_SUCCESS); 649 return (QLA_SUCCESS);
653 } 650 }
654 651
@@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
682 sizeof(ms_iocb_entry_t)); 679 sizeof(ms_iocb_entry_t));
683 if (rval != QLA_SUCCESS) { 680 if (rval != QLA_SUCCESS) {
684 /*EMPTY*/ 681 /*EMPTY*/
685 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", 682 ql_dbg(ql_dbg_disc, vha, 0x2051,
686 vha->host_no, rval)); 683 "RSNN_NN issue IOCB failed (%d).\n", rval);
687 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") != 684 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
688 QLA_SUCCESS) { 685 QLA_SUCCESS) {
689 rval = QLA_FUNCTION_FAILED; 686 rval = QLA_FUNCTION_FAILED;
690 } else { 687 } else {
691 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", 688 ql_dbg(ql_dbg_disc, vha, 0x2052,
692 vha->host_no)); 689 "RSNN_NN exiting normally.\n");
693 } 690 }
694 691
695 return (rval); 692 return (rval);
@@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
757 sizeof(struct sns_cmd_pkt)); 754 sizeof(struct sns_cmd_pkt));
758 if (rval != QLA_SUCCESS) { 755 if (rval != QLA_SUCCESS) {
759 /*EMPTY*/ 756 /*EMPTY*/
760 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n", 757 ql_dbg(ql_dbg_disc, vha, 0x205f,
761 vha->host_no, rval)); 758 "GA_NXT Send SNS failed (%d).\n", rval);
762 } else if (sns_cmd->p.gan_data[8] != 0x80 || 759 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
763 sns_cmd->p.gan_data[9] != 0x02) { 760 sns_cmd->p.gan_data[9] != 0x02) {
764 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " 761 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
765 "ga_nxt_rsp:\n", vha->host_no)); 762 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
766 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16)); 763 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
764 sns_cmd->p.gan_data, 16);
767 rval = QLA_FUNCTION_FAILED; 765 rval = QLA_FUNCTION_FAILED;
768 } else { 766 } else {
769 /* Populate fc_port_t entry. */ 767 /* Populate fc_port_t entry. */
@@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
778 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) 776 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
779 fcport->d_id.b.domain = 0xf0; 777 fcport->d_id.b.domain = 0xf0;
780 778
781 DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " 779 ql_dbg(ql_dbg_disc, vha, 0x2061,
782 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 780 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
783 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 781 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
784 "portid=%02x%02x%02x.\n", 782 "port_id=%02x%02x%02x.\n",
785 vha->host_no,
786 fcport->node_name[0], fcport->node_name[1], 783 fcport->node_name[0], fcport->node_name[1],
787 fcport->node_name[2], fcport->node_name[3], 784 fcport->node_name[2], fcport->node_name[3],
788 fcport->node_name[4], fcport->node_name[5], 785 fcport->node_name[4], fcport->node_name[5],
@@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
792 fcport->port_name[4], fcport->port_name[5], 789 fcport->port_name[4], fcport->port_name[5],
793 fcport->port_name[6], fcport->port_name[7], 790 fcport->port_name[6], fcport->port_name[7],
794 fcport->d_id.b.domain, fcport->d_id.b.area, 791 fcport->d_id.b.domain, fcport->d_id.b.area,
795 fcport->d_id.b.al_pa)); 792 fcport->d_id.b.al_pa);
796 } 793 }
797 794
798 return (rval); 795 return (rval);
@@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
831 sizeof(struct sns_cmd_pkt)); 828 sizeof(struct sns_cmd_pkt));
832 if (rval != QLA_SUCCESS) { 829 if (rval != QLA_SUCCESS) {
833 /*EMPTY*/ 830 /*EMPTY*/
834 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n", 831 ql_dbg(ql_dbg_disc, vha, 0x206d,
835 vha->host_no, rval)); 832 "GID_PT Send SNS failed (%d).\n", rval);
836 } else if (sns_cmd->p.gid_data[8] != 0x80 || 833 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
837 sns_cmd->p.gid_data[9] != 0x02) { 834 sns_cmd->p.gid_data[9] != 0x02) {
838 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " 835 ql_dbg(ql_dbg_disc, vha, 0x202f,
839 "gid_rsp:\n", vha->host_no)); 836 "GID_PT failed, rejected request, gid_rsp:\n");
840 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16)); 837 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
838 sns_cmd->p.gid_data, 16);
841 rval = QLA_FUNCTION_FAILED; 839 rval = QLA_FUNCTION_FAILED;
842 } else { 840 } else {
843 /* Set port IDs in switch info list. */ 841 /* Set port IDs in switch info list. */
@@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
900 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 898 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
901 if (rval != QLA_SUCCESS) { 899 if (rval != QLA_SUCCESS) {
902 /*EMPTY*/ 900 /*EMPTY*/
903 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed " 901 ql_dbg(ql_dbg_disc, vha, 0x2032,
904 "(%d).\n", vha->host_no, rval)); 902 "GPN_ID Send SNS failed (%d).\n", rval);
905 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 903 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
906 sns_cmd->p.gpn_data[9] != 0x02) { 904 sns_cmd->p.gpn_data[9] != 0x02) {
907 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " 905 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
908 "request, gpn_rsp:\n", vha->host_no)); 906 "GPN_ID failed, rejected request, gpn_rsp:\n");
909 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16)); 907 ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
908 sns_cmd->p.gpn_data, 16);
910 rval = QLA_FUNCTION_FAILED; 909 rval = QLA_FUNCTION_FAILED;
911 } else { 910 } else {
912 /* Save portname */ 911 /* Save portname */
@@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
955 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 954 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
956 if (rval != QLA_SUCCESS) { 955 if (rval != QLA_SUCCESS) {
957 /*EMPTY*/ 956 /*EMPTY*/
958 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed " 957 ql_dbg(ql_dbg_disc, vha, 0x203f,
959 "(%d).\n", vha->host_no, rval)); 958 "GNN_ID Send SNS failed (%d).\n", rval);
960 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 959 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
961 sns_cmd->p.gnn_data[9] != 0x02) { 960 sns_cmd->p.gnn_data[9] != 0x02) {
962 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected " 961 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
963 "request, gnn_rsp:\n", vha->host_no)); 962 "GNN_ID failed, rejected request, gnn_rsp:\n");
964 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16)); 963 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
964 sns_cmd->p.gnn_data, 16);
965 rval = QLA_FUNCTION_FAILED; 965 rval = QLA_FUNCTION_FAILED;
966 } else { 966 } else {
967 /* Save nodename */ 967 /* Save nodename */
968 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], 968 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
969 WWN_SIZE); 969 WWN_SIZE);
970 970
971 DEBUG2_3(printk("scsi(%ld): GID_PT entry - " 971 ql_dbg(ql_dbg_disc, vha, 0x206e,
972 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 972 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
973 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 973 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
974 "portid=%02x%02x%02x.\n", 974 "port_id=%02x%02x%02x.\n",
975 vha->host_no,
976 list[i].node_name[0], list[i].node_name[1], 975 list[i].node_name[0], list[i].node_name[1],
977 list[i].node_name[2], list[i].node_name[3], 976 list[i].node_name[2], list[i].node_name[3],
978 list[i].node_name[4], list[i].node_name[5], 977 list[i].node_name[4], list[i].node_name[5],
@@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
982 list[i].port_name[4], list[i].port_name[5], 981 list[i].port_name[4], list[i].port_name[5],
983 list[i].port_name[6], list[i].port_name[7], 982 list[i].port_name[6], list[i].port_name[7],
984 list[i].d_id.b.domain, list[i].d_id.b.area, 983 list[i].d_id.b.domain, list[i].d_id.b.area,
985 list[i].d_id.b.al_pa)); 984 list[i].d_id.b.al_pa);
986 } 985 }
987 986
988 /* Last device exit. */ 987 /* Last device exit. */
@@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1025 sizeof(struct sns_cmd_pkt)); 1024 sizeof(struct sns_cmd_pkt));
1026 if (rval != QLA_SUCCESS) { 1025 if (rval != QLA_SUCCESS) {
1027 /*EMPTY*/ 1026 /*EMPTY*/
1028 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n", 1027 ql_dbg(ql_dbg_disc, vha, 0x2060,
1029 vha->host_no, rval)); 1028 "RFT_ID Send SNS failed (%d).\n", rval);
1030 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1029 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1031 sns_cmd->p.rft_data[9] != 0x02) { 1030 sns_cmd->p.rft_data[9] != 0x02) {
1032 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, " 1031 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1033 "rft_rsp:\n", vha->host_no)); 1032 "RFT_ID failed, rejected request rft_rsp:\n");
1034 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16)); 1033 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1034 sns_cmd->p.rft_data, 16);
1035 rval = QLA_FUNCTION_FAILED; 1035 rval = QLA_FUNCTION_FAILED;
1036 } else { 1036 } else {
1037 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 1037 ql_dbg(ql_dbg_disc, vha, 0x2073,
1038 vha->host_no)); 1038 "RFT_ID exiting normally.\n");
1039 } 1039 }
1040 1040
1041 return (rval); 1041 return (rval);
@@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1081 sizeof(struct sns_cmd_pkt)); 1081 sizeof(struct sns_cmd_pkt));
1082 if (rval != QLA_SUCCESS) { 1082 if (rval != QLA_SUCCESS) {
1083 /*EMPTY*/ 1083 /*EMPTY*/
1084 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n", 1084 ql_dbg(ql_dbg_disc, vha, 0x204a,
1085 vha->host_no, rval)); 1085 "RNN_ID Send SNS failed (%d).\n", rval);
1086 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1086 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1087 sns_cmd->p.rnn_data[9] != 0x02) { 1087 sns_cmd->p.rnn_data[9] != 0x02) {
1088 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, " 1088 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1089 "rnn_rsp:\n", vha->host_no)); 1089 "RNN_ID failed, rejected request, rnn_rsp:\n");
1090 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16)); 1090 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1091 sns_cmd->p.rnn_data, 16);
1091 rval = QLA_FUNCTION_FAILED; 1092 rval = QLA_FUNCTION_FAILED;
1092 } else { 1093 } else {
1093 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 1094 ql_dbg(ql_dbg_disc, vha, 0x204c,
1094 vha->host_no)); 1095 "RNN_ID exiting normally.\n");
1095 } 1096 }
1096 1097
1097 return (rval); 1098 return (rval);
@@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1116 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1117 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1117 mb, BIT_1|BIT_0); 1118 mb, BIT_1|BIT_0);
1118 if (mb[0] != MBS_COMMAND_COMPLETE) { 1119 if (mb[0] != MBS_COMMAND_COMPLETE) {
1119 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1120 ql_dbg(ql_dbg_disc, vha, 0x2024,
1120 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1121 "Failed management_server login: loopid=%x mb[0]=%x "
1121 __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1], 1122 "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1122 mb[2], mb[6], mb[7])); 1123 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
1123 ret = QLA_FUNCTION_FAILED; 1124 ret = QLA_FUNCTION_FAILED;
1124 } else 1125 } else
1125 vha->flags.management_server_logged_in = 1; 1126 vha->flags.management_server_logged_in = 1;
@@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1292 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); 1293 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1293 size += 4 + WWN_SIZE; 1294 size += 4 + WWN_SIZE;
1294 1295
1295 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n", 1296 ql_dbg(ql_dbg_disc, vha, 0x2025,
1296 __func__, vha->host_no, 1297 "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
1297 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2], 1298 eiter->a.node_name[0], eiter->a.node_name[1],
1298 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5], 1299 eiter->a.node_name[2], eiter->a.node_name[3],
1299 eiter->a.node_name[6], eiter->a.node_name[7])); 1300 eiter->a.node_name[4], eiter->a.node_name[5],
1301 eiter->a.node_name[6], eiter->a.node_name[7]);
1300 1302
1301 /* Manufacturer. */ 1303 /* Manufacturer. */
1302 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1304 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1307 eiter->len = cpu_to_be16(4 + alen); 1309 eiter->len = cpu_to_be16(4 + alen);
1308 size += 4 + alen; 1310 size += 4 + alen;
1309 1311
1310 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no, 1312 ql_dbg(ql_dbg_disc, vha, 0x2026,
1311 eiter->a.manufacturer)); 1313 "Manufacturer = %s.\n", eiter->a.manufacturer);
1312 1314
1313 /* Serial number. */ 1315 /* Serial number. */
1314 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1316 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1320 eiter->len = cpu_to_be16(4 + alen); 1322 eiter->len = cpu_to_be16(4 + alen);
1321 size += 4 + alen; 1323 size += 4 + alen;
1322 1324
1323 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no, 1325 ql_dbg(ql_dbg_disc, vha, 0x2027,
1324 eiter->a.serial_num)); 1326 "Serial no. = %s.\n", eiter->a.serial_num);
1325 1327
1326 /* Model name. */ 1328 /* Model name. */
1327 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1329 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1332 eiter->len = cpu_to_be16(4 + alen); 1334 eiter->len = cpu_to_be16(4 + alen);
1333 size += 4 + alen; 1335 size += 4 + alen;
1334 1336
1335 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no, 1337 ql_dbg(ql_dbg_disc, vha, 0x2028,
1336 eiter->a.model)); 1338 "Model Name = %s.\n", eiter->a.model);
1337 1339
1338 /* Model description. */ 1340 /* Model description. */
1339 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1341 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1345 eiter->len = cpu_to_be16(4 + alen); 1347 eiter->len = cpu_to_be16(4 + alen);
1346 size += 4 + alen; 1348 size += 4 + alen;
1347 1349
1348 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no, 1350 ql_dbg(ql_dbg_disc, vha, 0x2029,
1349 eiter->a.model_desc)); 1351 "Model Desc = %s.\n", eiter->a.model_desc);
1350 1352
1351 /* Hardware version. */ 1353 /* Hardware version. */
1352 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1354 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1357 eiter->len = cpu_to_be16(4 + alen); 1359 eiter->len = cpu_to_be16(4 + alen);
1358 size += 4 + alen; 1360 size += 4 + alen;
1359 1361
1360 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no, 1362 ql_dbg(ql_dbg_disc, vha, 0x202a,
1361 eiter->a.hw_version)); 1363 "Hardware ver = %s.\n", eiter->a.hw_version);
1362 1364
1363 /* Driver version. */ 1365 /* Driver version. */
1364 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1366 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1369 eiter->len = cpu_to_be16(4 + alen); 1371 eiter->len = cpu_to_be16(4 + alen);
1370 size += 4 + alen; 1372 size += 4 + alen;
1371 1373
1372 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no, 1374 ql_dbg(ql_dbg_disc, vha, 0x202b,
1373 eiter->a.driver_version)); 1375 "Driver ver = %s.\n", eiter->a.driver_version);
1374 1376
1375 /* Option ROM version. */ 1377 /* Option ROM version. */
1376 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1378 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1381 eiter->len = cpu_to_be16(4 + alen); 1383 eiter->len = cpu_to_be16(4 + alen);
1382 size += 4 + alen; 1384 size += 4 + alen;
1383 1385
1384 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no, 1386 ql_dbg(ql_dbg_disc, vha , 0x202c,
1385 eiter->a.orom_version)); 1387 "Optrom vers = %s.\n", eiter->a.orom_version);
1386 1388
1387 /* Firmware version */ 1389 /* Firmware version */
1388 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1390 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1393 eiter->len = cpu_to_be16(4 + alen); 1395 eiter->len = cpu_to_be16(4 + alen);
1394 size += 4 + alen; 1396 size += 4 + alen;
1395 1397
1396 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no, 1398 ql_dbg(ql_dbg_disc, vha, 0x202d,
1397 eiter->a.fw_version)); 1399 "Firmware vers = %s.\n", eiter->a.fw_version);
1398 1400
1399 /* Update MS request size. */ 1401 /* Update MS request size. */
1400 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1402 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1401 1403
1402 DEBUG13(printk("%s(%ld): RHBA identifier=" 1404 ql_dbg(ql_dbg_disc, vha, 0x202e,
1403 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1405 "RHBA identifier = "
1404 vha->host_no, ct_req->req.rhba.hba_identifier[0], 1406 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
1407 ct_req->req.rhba.hba_identifier[0],
1405 ct_req->req.rhba.hba_identifier[1], 1408 ct_req->req.rhba.hba_identifier[1],
1406 ct_req->req.rhba.hba_identifier[2], 1409 ct_req->req.rhba.hba_identifier[2],
1407 ct_req->req.rhba.hba_identifier[3], 1410 ct_req->req.rhba.hba_identifier[3],
1408 ct_req->req.rhba.hba_identifier[4], 1411 ct_req->req.rhba.hba_identifier[4],
1409 ct_req->req.rhba.hba_identifier[5], 1412 ct_req->req.rhba.hba_identifier[5],
1410 ct_req->req.rhba.hba_identifier[6], 1413 ct_req->req.rhba.hba_identifier[6],
1411 ct_req->req.rhba.hba_identifier[7], size)); 1414 ct_req->req.rhba.hba_identifier[7], size);
1412 DEBUG13(qla2x00_dump_buffer(entries, size)); 1415 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1416 entries, size);
1413 1417
1414 /* Execute MS IOCB */ 1418 /* Execute MS IOCB */
1415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1419 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1416 sizeof(ms_iocb_entry_t)); 1420 sizeof(ms_iocb_entry_t));
1417 if (rval != QLA_SUCCESS) { 1421 if (rval != QLA_SUCCESS) {
1418 /*EMPTY*/ 1422 /*EMPTY*/
1419 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n", 1423 ql_dbg(ql_dbg_disc, vha, 0x2030,
1420 vha->host_no, rval)); 1424 "RHBA issue IOCB failed (%d).\n", rval);
1421 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") != 1425 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1422 QLA_SUCCESS) { 1426 QLA_SUCCESS) {
1423 rval = QLA_FUNCTION_FAILED; 1427 rval = QLA_FUNCTION_FAILED;
1424 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 1428 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1425 ct_rsp->header.explanation_code == 1429 ct_rsp->header.explanation_code ==
1426 CT_EXPL_ALREADY_REGISTERED) { 1430 CT_EXPL_ALREADY_REGISTERED) {
1427 DEBUG2_13(printk("%s(%ld): HBA already registered.\n", 1431 ql_dbg(ql_dbg_disc, vha, 0x2034,
1428 __func__, vha->host_no)); 1432 "HBA already registered.\n");
1429 rval = QLA_ALREADY_REGISTERED; 1433 rval = QLA_ALREADY_REGISTERED;
1430 } 1434 }
1431 } else { 1435 } else {
1432 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n", 1436 ql_dbg(ql_dbg_disc, vha, 0x2035,
1433 vha->host_no)); 1437 "RHBA exiting normally.\n");
1434 } 1438 }
1435 1439
1436 return rval; 1440 return rval;
@@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1464 /* Prepare FDMI command arguments -- portname. */ 1468 /* Prepare FDMI command arguments -- portname. */
1465 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); 1469 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
1466 1470
1467 DEBUG13(printk("%s(%ld): DHBA portname=" 1471 ql_dbg(ql_dbg_disc, vha, 0x2036,
1468 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no, 1472 "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
1469 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], 1473 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1470 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], 1474 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1471 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], 1475 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1472 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7])); 1476 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
1473 1477
1474 /* Execute MS IOCB */ 1478 /* Execute MS IOCB */
1475 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1479 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1476 sizeof(ms_iocb_entry_t)); 1480 sizeof(ms_iocb_entry_t));
1477 if (rval != QLA_SUCCESS) { 1481 if (rval != QLA_SUCCESS) {
1478 /*EMPTY*/ 1482 /*EMPTY*/
1479 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n", 1483 ql_dbg(ql_dbg_disc, vha, 0x2037,
1480 vha->host_no, rval)); 1484 "DHBA issue IOCB failed (%d).\n", rval);
1481 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != 1485 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
1482 QLA_SUCCESS) { 1486 QLA_SUCCESS) {
1483 rval = QLA_FUNCTION_FAILED; 1487 rval = QLA_FUNCTION_FAILED;
1484 } else { 1488 } else {
1485 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n", 1489 ql_dbg(ql_dbg_disc, vha, 0x2038,
1486 vha->host_no)); 1490 "DHBA exiting normally.\n");
1487 } 1491 }
1488 1492
1489 return rval; 1493 return rval;
@@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1534 eiter->a.fc4_types[2] = 0x01; 1538 eiter->a.fc4_types[2] = 0x01;
1535 size += 4 + 32; 1539 size += 4 + 32;
1536 1540
1537 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, 1541 ql_dbg(ql_dbg_disc, vha, 0x2039,
1538 vha->host_no, eiter->a.fc4_types[2], 1542 "FC4_TYPES=%02x %02x.\n",
1539 eiter->a.fc4_types[1])); 1543 eiter->a.fc4_types[2],
1544 eiter->a.fc4_types[1]);
1540 1545
1541 /* Supported speed. */ 1546 /* Supported speed. */
1542 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1547 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1561 FDMI_PORT_SPEED_1GB); 1566 FDMI_PORT_SPEED_1GB);
1562 size += 4 + 4; 1567 size += 4 + 4;
1563 1568
1564 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no, 1569 ql_dbg(ql_dbg_disc, vha, 0x203a,
1565 eiter->a.sup_speed)); 1570 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1566 1571
1567 /* Current speed. */ 1572 /* Current speed. */
1568 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1573 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1596 } 1601 }
1597 size += 4 + 4; 1602 size += 4 + 4;
1598 1603
1599 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no, 1604 ql_dbg(ql_dbg_disc, vha, 0x203b,
1600 eiter->a.cur_speed)); 1605 "Current_Speed=%x.\n", eiter->a.cur_speed);
1601 1606
1602 /* Max frame size. */ 1607 /* Max frame size. */
1603 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1608 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1609 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1614 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1610 size += 4 + 4; 1615 size += 4 + 4;
1611 1616
1612 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no, 1617 ql_dbg(ql_dbg_disc, vha, 0x203c,
1613 eiter->a.max_frame_size)); 1618 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1614 1619
1615 /* OS device name. */ 1620 /* OS device name. */
1616 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1621 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1621 eiter->len = cpu_to_be16(4 + alen); 1626 eiter->len = cpu_to_be16(4 + alen);
1622 size += 4 + alen; 1627 size += 4 + alen;
1623 1628
1624 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no, 1629 ql_dbg(ql_dbg_disc, vha, 0x204b,
1625 eiter->a.os_dev_name)); 1630 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1626 1631
1627 /* Hostname. */ 1632 /* Hostname. */
1628 if (strlen(fc_host_system_hostname(vha->host))) { 1633 if (strlen(fc_host_system_hostname(vha->host))) {
@@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1637 eiter->len = cpu_to_be16(4 + alen); 1642 eiter->len = cpu_to_be16(4 + alen);
1638 size += 4 + alen; 1643 size += 4 + alen;
1639 1644
1640 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__, 1645 ql_dbg(ql_dbg_disc, vha, 0x203d,
1641 vha->host_no, eiter->a.host_name)); 1646 "HostName=%s.\n", eiter->a.host_name);
1642 } 1647 }
1643 1648
1644 /* Update MS request size. */ 1649 /* Update MS request size. */
1645 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1650 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1646 1651
1647 DEBUG13(printk("%s(%ld): RPA portname=" 1652 ql_dbg(ql_dbg_disc, vha, 0x203e,
1648 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1653 "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
1649 vha->host_no, ct_req->req.rpa.port_name[0], 1654 ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
1650 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2], 1655 ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
1651 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4], 1656 ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
1652 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6], 1657 ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
1653 ct_req->req.rpa.port_name[7], size)); 1658 size);
1654 DEBUG13(qla2x00_dump_buffer(entries, size)); 1659 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1660 entries, size);
1655 1661
1656 /* Execute MS IOCB */ 1662 /* Execute MS IOCB */
1657 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1663 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1658 sizeof(ms_iocb_entry_t)); 1664 sizeof(ms_iocb_entry_t));
1659 if (rval != QLA_SUCCESS) { 1665 if (rval != QLA_SUCCESS) {
1660 /*EMPTY*/ 1666 /*EMPTY*/
1661 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n", 1667 ql_dbg(ql_dbg_disc, vha, 0x2040,
1662 vha->host_no, rval)); 1668 "RPA issue IOCB failed (%d).\n", rval);
1663 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") != 1669 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1664 QLA_SUCCESS) { 1670 QLA_SUCCESS) {
1665 rval = QLA_FUNCTION_FAILED; 1671 rval = QLA_FUNCTION_FAILED;
1666 } else { 1672 } else {
1667 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n", 1673 ql_dbg(ql_dbg_disc, vha, 0x2041,
1668 vha->host_no)); 1674 "RPA exiting nornally.\n");
1669 } 1675 }
1670 1676
1671 return rval; 1677 return rval;
@@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1749 sizeof(ms_iocb_entry_t)); 1755 sizeof(ms_iocb_entry_t));
1750 if (rval != QLA_SUCCESS) { 1756 if (rval != QLA_SUCCESS) {
1751 /*EMPTY*/ 1757 /*EMPTY*/
1752 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB " 1758 ql_dbg(ql_dbg_disc, vha, 0x2023,
1753 "failed (%d).\n", vha->host_no, rval)); 1759 "GFPN_ID issue IOCB failed (%d).\n", rval);
1754 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1760 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1755 "GFPN_ID") != QLA_SUCCESS) { 1761 "GFPN_ID") != QLA_SUCCESS) {
1756 rval = QLA_FUNCTION_FAILED; 1762 rval = QLA_FUNCTION_FAILED;
@@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1860 sizeof(ms_iocb_entry_t)); 1866 sizeof(ms_iocb_entry_t));
1861 if (rval != QLA_SUCCESS) { 1867 if (rval != QLA_SUCCESS) {
1862 /*EMPTY*/ 1868 /*EMPTY*/
1863 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB " 1869 ql_dbg(ql_dbg_disc, vha, 0x2059,
1864 "failed (%d).\n", vha->host_no, rval)); 1870 "GPSC issue IOCB failed (%d).\n", rval);
1865 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1871 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1866 "GPSC")) != QLA_SUCCESS) { 1872 "GPSC")) != QLA_SUCCESS) {
1867 /* FM command unsupported? */ 1873 /* FM command unsupported? */
@@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1870 CT_REASON_INVALID_COMMAND_CODE || 1876 CT_REASON_INVALID_COMMAND_CODE ||
1871 ct_rsp->header.reason_code == 1877 ct_rsp->header.reason_code ==
1872 CT_REASON_COMMAND_UNSUPPORTED)) { 1878 CT_REASON_COMMAND_UNSUPPORTED)) {
1873 DEBUG2(printk("scsi(%ld): GPSC command " 1879 ql_dbg(ql_dbg_disc, vha, 0x205a,
1874 "unsupported, disabling query...\n", 1880 "GPSC command unsupported, disabling "
1875 vha->host_no)); 1881 "query.\n");
1876 ha->flags.gpsc_supported = 0; 1882 ha->flags.gpsc_supported = 0;
1877 rval = QLA_FUNCTION_FAILED; 1883 rval = QLA_FUNCTION_FAILED;
1878 break; 1884 break;
@@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1898 break; 1904 break;
1899 } 1905 }
1900 1906
1901 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - " 1907 ql_dbg(ql_dbg_disc, vha, 0x205b,
1902 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " 1908 "GPSC ext entry - fpn "
1903 "speed=%04x.\n", vha->host_no, 1909 "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
1910 "speed=%04x.\n",
1904 list[i].fabric_port_name[0], 1911 list[i].fabric_port_name[0],
1905 list[i].fabric_port_name[1], 1912 list[i].fabric_port_name[1],
1906 list[i].fabric_port_name[2], 1913 list[i].fabric_port_name[2],
@@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1910 list[i].fabric_port_name[6], 1917 list[i].fabric_port_name[6],
1911 list[i].fabric_port_name[7], 1918 list[i].fabric_port_name[7],
1912 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 1919 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
1913 be16_to_cpu(ct_rsp->rsp.gpsc.speed))); 1920 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
1914 } 1921 }
1915 1922
1916 /* Last device exit. */ 1923 /* Last device exit. */
@@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
1968 sizeof(ms_iocb_entry_t)); 1975 sizeof(ms_iocb_entry_t));
1969 1976
1970 if (rval != QLA_SUCCESS) { 1977 if (rval != QLA_SUCCESS) {
1971 DEBUG2_3(printk(KERN_INFO 1978 ql_dbg(ql_dbg_disc, vha, 0x205c,
1972 "scsi(%ld): GFF_ID issue IOCB failed " 1979 "GFF_ID issue IOCB failed (%d).\n", rval);
1973 "(%d).\n", vha->host_no, rval));
1974 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1980 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1975 "GFF_ID") != QLA_SUCCESS) { 1981 "GFF_ID") != QLA_SUCCESS) {
1976 DEBUG2_3(printk(KERN_INFO 1982 ql_dbg(ql_dbg_disc, vha, 0x205d,
1977 "scsi(%ld): GFF_ID IOCB status had a " 1983 "GFF_ID IOCB status had a failure status code.\n");
1978 "failure status code\n", vha->host_no));
1979 } else { 1984 } else {
1980 fcp_scsi_features = 1985 fcp_scsi_features =
1981 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 1986 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3d0384506929..def694271bf7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -153,11 +153,10 @@ qla2x00_async_iocb_timeout(srb_t *sp)
153 fc_port_t *fcport = sp->fcport; 153 fc_port_t *fcport = sp->fcport;
154 struct srb_ctx *ctx = sp->ctx; 154 struct srb_ctx *ctx = sp->ctx;
155 155
156 DEBUG2(printk(KERN_WARNING 156 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
157 "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n", 157 "Async-%s timeout - portid=%02x%02x%02x.\n",
158 fcport->vha->host_no, sp->handle, 158 ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
159 ctx->name, fcport->d_id.b.domain, 159 fcport->d_id.b.al_pa);
160 fcport->d_id.b.area, fcport->d_id.b.al_pa));
161 160
162 fcport->flags &= ~FCF_ASYNC_SENT; 161 fcport->flags &= ~FCF_ASYNC_SENT;
163 if (ctx->type == SRB_LOGIN_CMD) { 162 if (ctx->type == SRB_LOGIN_CMD) {
@@ -211,11 +210,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
211 if (rval != QLA_SUCCESS) 210 if (rval != QLA_SUCCESS)
212 goto done_free_sp; 211 goto done_free_sp;
213 212
214 DEBUG2(printk(KERN_DEBUG 213 ql_dbg(ql_dbg_disc, vha, 0x2072,
215 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x " 214 "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
216 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id, 215 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
217 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 216 fcport->d_id.b.al_pa, fcport->login_retry);
218 fcport->login_retry));
219 return rval; 217 return rval;
220 218
221done_free_sp: 219done_free_sp:
@@ -259,10 +257,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
259 if (rval != QLA_SUCCESS) 257 if (rval != QLA_SUCCESS)
260 goto done_free_sp; 258 goto done_free_sp;
261 259
262 DEBUG2(printk(KERN_DEBUG 260 ql_dbg(ql_dbg_disc, vha, 0x2070,
263 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n", 261 "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
264 fcport->vha->host_no, sp->handle, fcport->loop_id, 262 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
265 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 263 fcport->d_id.b.al_pa);
266 return rval; 264 return rval;
267 265
268done_free_sp: 266done_free_sp:
@@ -309,11 +307,10 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
309 if (rval != QLA_SUCCESS) 307 if (rval != QLA_SUCCESS)
310 goto done_free_sp; 308 goto done_free_sp;
311 309
312 DEBUG2(printk(KERN_DEBUG 310 ql_dbg(ql_dbg_disc, vha, 0x206f,
313 "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n", 311 "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
314 fcport->vha->host_no, sp->handle, fcport->loop_id, 312 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
315 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 313 fcport->d_id.b.al_pa);
316
317 return rval; 314 return rval;
318 315
319done_free_sp: 316done_free_sp:
@@ -362,11 +359,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
362 if (rval != QLA_SUCCESS) 359 if (rval != QLA_SUCCESS)
363 goto done_free_sp; 360 goto done_free_sp;
364 361
365 DEBUG2(printk(KERN_DEBUG 362 ql_dbg(ql_dbg_taskm, vha, 0x802f,
366 "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n", 363 "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
367 fcport->vha->host_no, sp->handle, fcport->loop_id, 364 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
368 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 365 fcport->d_id.b.al_pa);
369
370 return rval; 366 return rval;
371 367
372done_free_sp: 368done_free_sp:
@@ -471,9 +467,8 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
471 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 467 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
472 468
473 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { 469 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
474 DEBUG2_3_11(printk(KERN_WARNING 470 ql_dbg(ql_dbg_taskm, vha, 0x8030,
475 "%s(%ld): TM IOCB failed (%x).\n", 471 "TM IOCB failed (%x).\n", rval);
476 __func__, vha->host_no, rval));
477 } 472 }
478 473
479 return; 474 return;
@@ -519,11 +514,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
519 set_bit(0, ha->req_qid_map); 514 set_bit(0, ha->req_qid_map);
520 set_bit(0, ha->rsp_qid_map); 515 set_bit(0, ha->rsp_qid_map);
521 516
522 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 517 ql_log(ql_log_info, vha, 0x0040,
518 "Configuring PCI space...\n");
523 rval = ha->isp_ops->pci_config(vha); 519 rval = ha->isp_ops->pci_config(vha);
524 if (rval) { 520 if (rval) {
525 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 521 ql_log(ql_log_warn, vha, 0x0044,
526 vha->host_no)); 522 "Unable to configure PCI space.\n");
527 return (rval); 523 return (rval);
528 } 524 }
529 525
@@ -531,20 +527,21 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
531 527
532 rval = qla2xxx_get_flash_info(vha); 528 rval = qla2xxx_get_flash_info(vha);
533 if (rval) { 529 if (rval) {
534 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 530 ql_log(ql_log_fatal, vha, 0x004f,
535 vha->host_no)); 531 "Unable to validate FLASH data.\n");
536 return (rval); 532 return (rval);
537 } 533 }
538 534
539 ha->isp_ops->get_flash_version(vha, req->ring); 535 ha->isp_ops->get_flash_version(vha, req->ring);
540 536 ql_log(ql_log_info, vha, 0x0061,
541 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 537 "Configure NVRAM parameters...\n");
542 538
543 ha->isp_ops->nvram_config(vha); 539 ha->isp_ops->nvram_config(vha);
544 540
545 if (ha->flags.disable_serdes) { 541 if (ha->flags.disable_serdes) {
546 /* Mask HBA via NVRAM settings? */ 542 /* Mask HBA via NVRAM settings? */
547 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 543 ql_log(ql_log_info, vha, 0x0077,
544 "Masking HBA WWPN "
548 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 545 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
549 vha->port_name[0], vha->port_name[1], 546 vha->port_name[0], vha->port_name[1],
550 vha->port_name[2], vha->port_name[3], 547 vha->port_name[2], vha->port_name[3],
@@ -553,7 +550,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
553 return QLA_FUNCTION_FAILED; 550 return QLA_FUNCTION_FAILED;
554 } 551 }
555 552
556 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 553 ql_log(ql_log_info, vha, 0x0078,
554 "Verifying loaded RISC code...\n");
557 555
558 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 556 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
559 rval = ha->isp_ops->chip_diag(vha); 557 rval = ha->isp_ops->chip_diag(vha);
@@ -567,7 +565,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
567 if (IS_QLA84XX(ha)) { 565 if (IS_QLA84XX(ha)) {
568 ha->cs84xx = qla84xx_get_chip(vha); 566 ha->cs84xx = qla84xx_get_chip(vha);
569 if (!ha->cs84xx) { 567 if (!ha->cs84xx) {
570 qla_printk(KERN_ERR, ha, 568 ql_log(ql_log_warn, vha, 0x00d0,
571 "Unable to configure ISP84XX.\n"); 569 "Unable to configure ISP84XX.\n");
572 return QLA_FUNCTION_FAILED; 570 return QLA_FUNCTION_FAILED;
573 } 571 }
@@ -579,8 +577,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
579 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 577 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
580 rval = qla84xx_init_chip(vha); 578 rval = qla84xx_init_chip(vha);
581 if (rval != QLA_SUCCESS) { 579 if (rval != QLA_SUCCESS) {
582 qla_printk(KERN_ERR, ha, 580 ql_log(ql_log_warn, vha, 0x00d4,
583 "Unable to initialize ISP84XX.\n"); 581 "Unable to initialize ISP84XX.\n");
584 qla84xx_put_chip(vha); 582 qla84xx_put_chip(vha);
585 } 583 }
586 } 584 }
@@ -797,9 +795,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
797 rval = QLA_FUNCTION_FAILED; 795 rval = QLA_FUNCTION_FAILED;
798 796
799 if (ha->flags.disable_risc_code_load) { 797 if (ha->flags.disable_risc_code_load) {
800 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 798 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
801 vha->host_no));
802 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
803 799
804 /* Verify checksum of loaded RISC code. */ 800 /* Verify checksum of loaded RISC code. */
805 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 801 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
@@ -810,10 +806,9 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
810 } 806 }
811 } 807 }
812 808
813 if (rval) { 809 if (rval)
814 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 810 ql_dbg(ql_dbg_init, vha, 0x007a,
815 vha->host_no)); 811 "**** Load RISC code ****.\n");
816 }
817 812
818 return (rval); 813 return (rval);
819} 814}
@@ -1105,8 +1100,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1105 /* Assume a failed state */ 1100 /* Assume a failed state */
1106 rval = QLA_FUNCTION_FAILED; 1101 rval = QLA_FUNCTION_FAILED;
1107 1102
1108 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 1103 ql_dbg(ql_dbg_init, vha, 0x007b,
1109 vha->host_no, (u_long)&reg->flash_address)); 1104 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1110 1105
1111 spin_lock_irqsave(&ha->hardware_lock, flags); 1106 spin_lock_irqsave(&ha->hardware_lock, flags);
1112 1107
@@ -1128,8 +1123,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1128 if (!cnt) 1123 if (!cnt)
1129 goto chip_diag_failed; 1124 goto chip_diag_failed;
1130 1125
1131 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 1126 ql_dbg(ql_dbg_init, vha, 0x007c,
1132 vha->host_no)); 1127 "Reset register cleared by chip reset.\n");
1133 1128
1134 /* Reset RISC processor. */ 1129 /* Reset RISC processor. */
1135 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 1130 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -1150,7 +1145,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1150 goto chip_diag_failed; 1145 goto chip_diag_failed;
1151 1146
1152 /* Check product ID of chip */ 1147 /* Check product ID of chip */
1153 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); 1148 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1154 1149
1155 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 1150 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1156 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 1151 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -1158,8 +1153,9 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1158 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 1153 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1159 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 1154 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1160 mb[3] != PROD_ID_3) { 1155 mb[3] != PROD_ID_3) {
1161 qla_printk(KERN_WARNING, ha, 1156 ql_log(ql_log_warn, vha, 0x0062,
1162 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]); 1157 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
1158 mb[1], mb[2], mb[3]);
1163 1159
1164 goto chip_diag_failed; 1160 goto chip_diag_failed;
1165 } 1161 }
@@ -1178,8 +1174,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1178 if (IS_QLA2200(ha) && 1174 if (IS_QLA2200(ha) &&
1179 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 1175 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1180 /* Limit firmware transfer size with a 2200A */ 1176 /* Limit firmware transfer size with a 2200A */
1181 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 1177 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1182 vha->host_no));
1183 1178
1184 ha->device_type |= DT_ISP2200A; 1179 ha->device_type |= DT_ISP2200A;
1185 ha->fw_transfer_size = 128; 1180 ha->fw_transfer_size = 128;
@@ -1188,24 +1183,20 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1188 /* Wrap Incoming Mailboxes Test. */ 1183 /* Wrap Incoming Mailboxes Test. */
1189 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1190 1185
1191 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no)); 1186 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
1192 rval = qla2x00_mbx_reg_test(vha); 1187 rval = qla2x00_mbx_reg_test(vha);
1193 if (rval) { 1188 if (rval)
1194 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 1189 ql_log(ql_log_warn, vha, 0x0080,
1195 vha->host_no)); 1190 "Failed mailbox send register test.\n");
1196 qla_printk(KERN_WARNING, ha, 1191 else
1197 "Failed mailbox send register test\n");
1198 }
1199 else {
1200 /* Flag a successful rval */ 1192 /* Flag a successful rval */
1201 rval = QLA_SUCCESS; 1193 rval = QLA_SUCCESS;
1202 }
1203 spin_lock_irqsave(&ha->hardware_lock, flags); 1194 spin_lock_irqsave(&ha->hardware_lock, flags);
1204 1195
1205chip_diag_failed: 1196chip_diag_failed:
1206 if (rval) 1197 if (rval)
1207 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 1198 ql_log(ql_log_info, vha, 0x0081,
1208 "****\n", vha->host_no)); 1199 "Chip diagnostics **** FAILED ****.\n");
1209 1200
1210 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1201 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1211 1202
@@ -1232,10 +1223,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
1232 1223
1233 rval = qla2x00_mbx_reg_test(vha); 1224 rval = qla2x00_mbx_reg_test(vha);
1234 if (rval) { 1225 if (rval) {
1235 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 1226 ql_log(ql_log_warn, vha, 0x0082,
1236 vha->host_no)); 1227 "Failed mailbox send register test.\n");
1237 qla_printk(KERN_WARNING, ha,
1238 "Failed mailbox send register test\n");
1239 } else { 1228 } else {
1240 /* Flag a successful rval */ 1229 /* Flag a successful rval */
1241 rval = QLA_SUCCESS; 1230 rval = QLA_SUCCESS;
@@ -1257,8 +1246,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1257 struct rsp_que *rsp = ha->rsp_q_map[0]; 1246 struct rsp_que *rsp = ha->rsp_q_map[0];
1258 1247
1259 if (ha->fw_dump) { 1248 if (ha->fw_dump) {
1260 qla_printk(KERN_WARNING, ha, 1249 ql_dbg(ql_dbg_init, vha, 0x00bd,
1261 "Firmware dump previously allocated.\n"); 1250 "Firmware dump already allocated.\n");
1262 return; 1251 return;
1263 } 1252 }
1264 1253
@@ -1288,8 +1277,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1288 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1277 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1289 GFP_KERNEL); 1278 GFP_KERNEL);
1290 if (!tc) { 1279 if (!tc) {
1291 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1280 ql_log(ql_log_warn, vha, 0x00be,
1292 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 1281 "Unable to allocate (%d KB) for FCE.\n",
1282 FCE_SIZE / 1024);
1293 goto try_eft; 1283 goto try_eft;
1294 } 1284 }
1295 1285
@@ -1297,16 +1287,15 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1297 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 1287 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1298 ha->fce_mb, &ha->fce_bufs); 1288 ha->fce_mb, &ha->fce_bufs);
1299 if (rval) { 1289 if (rval) {
1300 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1290 ql_log(ql_log_warn, vha, 0x00bf,
1301 "FCE (%d).\n", rval); 1291 "Unable to initialize FCE (%d).\n", rval);
1302 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 1292 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1303 tc_dma); 1293 tc_dma);
1304 ha->flags.fce_enabled = 0; 1294 ha->flags.fce_enabled = 0;
1305 goto try_eft; 1295 goto try_eft;
1306 } 1296 }
1307 1297 ql_log(ql_log_info, vha, 0x00c0,
1308 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 1298 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
1309 FCE_SIZE / 1024);
1310 1299
1311 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 1300 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1312 ha->flags.fce_enabled = 1; 1301 ha->flags.fce_enabled = 1;
@@ -1317,23 +1306,23 @@ try_eft:
1317 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 1306 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1318 GFP_KERNEL); 1307 GFP_KERNEL);
1319 if (!tc) { 1308 if (!tc) {
1320 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1309 ql_log(ql_log_warn, vha, 0x00c1,
1321 "(%d KB) for EFT.\n", EFT_SIZE / 1024); 1310 "Unable to allocate (%d KB) for EFT.\n",
1311 EFT_SIZE / 1024);
1322 goto cont_alloc; 1312 goto cont_alloc;
1323 } 1313 }
1324 1314
1325 memset(tc, 0, EFT_SIZE); 1315 memset(tc, 0, EFT_SIZE);
1326 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 1316 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1327 if (rval) { 1317 if (rval) {
1328 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1318 ql_log(ql_log_warn, vha, 0x00c2,
1329 "EFT (%d).\n", rval); 1319 "Unable to initialize EFT (%d).\n", rval);
1330 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 1320 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1331 tc_dma); 1321 tc_dma);
1332 goto cont_alloc; 1322 goto cont_alloc;
1333 } 1323 }
1334 1324 ql_log(ql_log_info, vha, 0x00c3,
1335 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n", 1325 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
1336 EFT_SIZE / 1024);
1337 1326
1338 eft_size = EFT_SIZE; 1327 eft_size = EFT_SIZE;
1339 ha->eft_dma = tc_dma; 1328 ha->eft_dma = tc_dma;
@@ -1350,8 +1339,9 @@ cont_alloc:
1350 1339
1351 ha->fw_dump = vmalloc(dump_size); 1340 ha->fw_dump = vmalloc(dump_size);
1352 if (!ha->fw_dump) { 1341 if (!ha->fw_dump) {
1353 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 1342 ql_log(ql_log_warn, vha, 0x00c4,
1354 "firmware dump!!!\n", dump_size / 1024); 1343 "Unable to allocate (%d KB) for firmware dump.\n",
1344 dump_size / 1024);
1355 1345
1356 if (ha->fce) { 1346 if (ha->fce) {
1357 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 1347 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -1368,8 +1358,8 @@ cont_alloc:
1368 } 1358 }
1369 return; 1359 return;
1370 } 1360 }
1371 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 1361 ql_log(ql_log_info, vha, 0x00c5,
1372 dump_size / 1024); 1362 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1373 1363
1374 ha->fw_dump_len = dump_size; 1364 ha->fw_dump_len = dump_size;
1375 ha->fw_dump->signature[0] = 'Q'; 1365 ha->fw_dump->signature[0] = 'Q';
@@ -1398,23 +1388,21 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
1398 int rval; 1388 int rval;
1399 uint16_t dc; 1389 uint16_t dc;
1400 uint32_t dw; 1390 uint32_t dw;
1401 struct qla_hw_data *ha = vha->hw;
1402 1391
1403 if (!IS_QLA81XX(vha->hw)) 1392 if (!IS_QLA81XX(vha->hw))
1404 return QLA_SUCCESS; 1393 return QLA_SUCCESS;
1405 1394
1406 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 1395 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1407 if (rval != QLA_SUCCESS) { 1396 if (rval != QLA_SUCCESS) {
1408 DEBUG2(qla_printk(KERN_WARNING, ha, 1397 ql_log(ql_log_warn, vha, 0x0105,
1409 "Sync-MPI: Unable to acquire semaphore.\n")); 1398 "Unable to acquire semaphore.\n");
1410 goto done; 1399 goto done;
1411 } 1400 }
1412 1401
1413 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 1402 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1414 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 1403 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1415 if (rval != QLA_SUCCESS) { 1404 if (rval != QLA_SUCCESS) {
1416 DEBUG2(qla_printk(KERN_WARNING, ha, 1405 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
1417 "Sync-MPI: Unable to read sync.\n"));
1418 goto done_release; 1406 goto done_release;
1419 } 1407 }
1420 1408
@@ -1426,15 +1414,14 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
1426 dw |= dc; 1414 dw |= dc;
1427 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 1415 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1428 if (rval != QLA_SUCCESS) { 1416 if (rval != QLA_SUCCESS) {
1429 DEBUG2(qla_printk(KERN_WARNING, ha, 1417 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
1430 "Sync-MPI: Unable to gain sync.\n"));
1431 } 1418 }
1432 1419
1433done_release: 1420done_release:
1434 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 1421 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1435 if (rval != QLA_SUCCESS) { 1422 if (rval != QLA_SUCCESS) {
1436 DEBUG2(qla_printk(KERN_WARNING, ha, 1423 ql_log(ql_log_warn, vha, 0x006d,
1437 "Sync-MPI: Unable to release semaphore.\n")); 1424 "Unable to release semaphore.\n");
1438 } 1425 }
1439 1426
1440done: 1427done:
@@ -1479,14 +1466,14 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1479 /* Load firmware sequences */ 1466 /* Load firmware sequences */
1480 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1467 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1481 if (rval == QLA_SUCCESS) { 1468 if (rval == QLA_SUCCESS) {
1482 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 1469 ql_dbg(ql_dbg_init, vha, 0x00c9,
1483 "code.\n", vha->host_no)); 1470 "Verifying Checksum of loaded RISC code.\n");
1484 1471
1485 rval = qla2x00_verify_checksum(vha, srisc_address); 1472 rval = qla2x00_verify_checksum(vha, srisc_address);
1486 if (rval == QLA_SUCCESS) { 1473 if (rval == QLA_SUCCESS) {
1487 /* Start firmware execution. */ 1474 /* Start firmware execution. */
1488 DEBUG(printk("scsi(%ld): Checksum OK, start " 1475 ql_dbg(ql_dbg_init, vha, 0x00ca,
1489 "firmware.\n", vha->host_no)); 1476 "Starting firmware.\n");
1490 1477
1491 rval = qla2x00_execute_fw(vha, srisc_address); 1478 rval = qla2x00_execute_fw(vha, srisc_address);
1492 /* Retrieve firmware information. */ 1479 /* Retrieve firmware information. */
@@ -1522,9 +1509,9 @@ enable_82xx_npiv:
1522 } 1509 }
1523 } 1510 }
1524 } else { 1511 } else {
1525 DEBUG2(printk(KERN_INFO 1512 ql_log(ql_log_fatal, vha, 0x00cd,
1526 "scsi(%ld): ISP Firmware failed checksum.\n", 1513 "ISP Firmware failed checksum.\n");
1527 vha->host_no)); 1514 goto failed;
1528 } 1515 }
1529 } 1516 }
1530 1517
@@ -1549,7 +1536,7 @@ enable_82xx_npiv:
1549 ha->flags.fac_supported = 1; 1536 ha->flags.fac_supported = 1;
1550 ha->fdt_block_size = size << 2; 1537 ha->fdt_block_size = size << 2;
1551 } else { 1538 } else {
1552 qla_printk(KERN_ERR, ha, 1539 ql_log(ql_log_warn, vha, 0x00ce,
1553 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1540 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1554 ha->fw_major_version, ha->fw_minor_version, 1541 ha->fw_major_version, ha->fw_minor_version,
1555 ha->fw_subminor_version); 1542 ha->fw_subminor_version);
@@ -1557,8 +1544,8 @@ enable_82xx_npiv:
1557 } 1544 }
1558failed: 1545failed:
1559 if (rval) { 1546 if (rval) {
1560 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1547 ql_log(ql_log_fatal, vha, 0x00cf,
1561 vha->host_no)); 1548 "Setup chip ****FAILED****.\n");
1562 } 1549 }
1563 1550
1564 return (rval); 1551 return (rval);
@@ -1608,10 +1595,11 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
1608 return; 1595 return;
1609 1596
1610 /* Serial Link options. */ 1597 /* Serial Link options. */
1611 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1598 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
1612 vha->host_no)); 1599 "Serial link options.\n");
1613 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1600 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
1614 sizeof(ha->fw_seriallink_options))); 1601 (uint8_t *)&ha->fw_seriallink_options,
1602 sizeof(ha->fw_seriallink_options));
1615 1603
1616 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1604 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1617 if (ha->fw_seriallink_options[3] & BIT_2) { 1605 if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -1688,7 +1676,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
1688 le16_to_cpu(ha->fw_seriallink_options24[2]), 1676 le16_to_cpu(ha->fw_seriallink_options24[2]),
1689 le16_to_cpu(ha->fw_seriallink_options24[3])); 1677 le16_to_cpu(ha->fw_seriallink_options24[3]));
1690 if (rval != QLA_SUCCESS) { 1678 if (rval != QLA_SUCCESS) {
1691 qla_printk(KERN_WARNING, ha, 1679 ql_log(ql_log_warn, vha, 0x0104,
1692 "Unable to update Serial Link options (%x).\n", rval); 1680 "Unable to update Serial Link options (%x).\n", rval);
1693 } 1681 }
1694} 1682}
@@ -1746,8 +1734,9 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1746 icb->rid = __constant_cpu_to_le16(rid); 1734 icb->rid = __constant_cpu_to_le16(rid);
1747 if (ha->flags.msix_enabled) { 1735 if (ha->flags.msix_enabled) {
1748 msix = &ha->msix_entries[1]; 1736 msix = &ha->msix_entries[1];
1749 DEBUG2_17(printk(KERN_INFO 1737 ql_dbg(ql_dbg_init, vha, 0x00fd,
1750 "Registering vector 0x%x for base que\n", msix->entry)); 1738 "Registering vector 0x%x for base que.\n",
1739 msix->entry);
1751 icb->msix = cpu_to_le16(msix->entry); 1740 icb->msix = cpu_to_le16(msix->entry);
1752 } 1741 }
1753 /* Use alternate PCI bus number */ 1742 /* Use alternate PCI bus number */
@@ -1764,8 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1764 icb->firmware_options_2 &= 1753 icb->firmware_options_2 &=
1765 __constant_cpu_to_le32(~BIT_22); 1754 __constant_cpu_to_le32(~BIT_22);
1766 ha->flags.disable_msix_handshake = 1; 1755 ha->flags.disable_msix_handshake = 1;
1767 qla_printk(KERN_INFO, ha, 1756 ql_dbg(ql_dbg_init, vha, 0x00fe,
1768 "MSIX Handshake Disable Mode turned on\n"); 1757 "MSIX Handshake Disable Mode turned on.\n");
1769 } else { 1758 } else {
1770 icb->firmware_options_2 |= 1759 icb->firmware_options_2 |=
1771 __constant_cpu_to_le32(BIT_22); 1760 __constant_cpu_to_le32(BIT_22);
@@ -1850,7 +1839,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1850 /* Update any ISP specific firmware options before initialization. */ 1839 /* Update any ISP specific firmware options before initialization. */
1851 ha->isp_ops->update_fw_options(vha); 1840 ha->isp_ops->update_fw_options(vha);
1852 1841
1853 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); 1842 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
1854 1843
1855 if (ha->flags.npiv_supported) { 1844 if (ha->flags.npiv_supported) {
1856 if (ha->operating_mode == LOOP) 1845 if (ha->operating_mode == LOOP)
@@ -1866,11 +1855,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1866 1855
1867 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 1856 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1868 if (rval) { 1857 if (rval) {
1869 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1858 ql_log(ql_log_fatal, vha, 0x00d2,
1870 vha->host_no)); 1859 "Init Firmware **** FAILED ****.\n");
1871 } else { 1860 } else {
1872 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1861 ql_dbg(ql_dbg_init, vha, 0x00d3,
1873 vha->host_no)); 1862 "Init Firmware -- success.\n");
1874 } 1863 }
1875 1864
1876 return (rval); 1865 return (rval);
@@ -1913,10 +1902,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1913 1902
1914 /* Wait for ISP to finish LIP */ 1903 /* Wait for ISP to finish LIP */
1915 if (!vha->flags.init_done) 1904 if (!vha->flags.init_done)
1916 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1905 ql_log(ql_log_info, vha, 0x801e,
1917 1906 "Waiting for LIP to complete.\n");
1918 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1919 vha->host_no));
1920 1907
1921 do { 1908 do {
1922 rval = qla2x00_get_firmware_state(vha, state); 1909 rval = qla2x00_get_firmware_state(vha, state);
@@ -1925,30 +1912,35 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1925 vha->device_flags &= ~DFLG_NO_CABLE; 1912 vha->device_flags &= ~DFLG_NO_CABLE;
1926 } 1913 }
1927 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1914 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1928 DEBUG16(printk("scsi(%ld): fw_state=%x " 1915 ql_dbg(ql_dbg_taskm, vha, 0x801f,
1929 "84xx=%x.\n", vha->host_no, state[0], 1916 "fw_state=%x 84xx=%x.\n", state[0],
1930 state[2])); 1917 state[2]);
1931 if ((state[2] & FSTATE_LOGGED_IN) && 1918 if ((state[2] & FSTATE_LOGGED_IN) &&
1932 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1919 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1933 DEBUG16(printk("scsi(%ld): Sending " 1920 ql_dbg(ql_dbg_taskm, vha, 0x8028,
1934 "verify iocb.\n", vha->host_no)); 1921 "Sending verify iocb.\n");
1935 1922
1936 cs84xx_time = jiffies; 1923 cs84xx_time = jiffies;
1937 rval = qla84xx_init_chip(vha); 1924 rval = qla84xx_init_chip(vha);
1938 if (rval != QLA_SUCCESS) 1925 if (rval != QLA_SUCCESS) {
1926 ql_log(ql_log_warn,
1927 vha, 0x8043,
1928 "Init chip failed.\n");
1939 break; 1929 break;
1930 }
1940 1931
1941 /* Add time taken to initialize. */ 1932 /* Add time taken to initialize. */
1942 cs84xx_time = jiffies - cs84xx_time; 1933 cs84xx_time = jiffies - cs84xx_time;
1943 wtime += cs84xx_time; 1934 wtime += cs84xx_time;
1944 mtime += cs84xx_time; 1935 mtime += cs84xx_time;
1945 DEBUG16(printk("scsi(%ld): Increasing " 1936 ql_dbg(ql_dbg_taskm, vha, 0x8042,
1946 "wait time by %ld. New time %ld\n", 1937 "Increasing wait time by %ld. "
1947 vha->host_no, cs84xx_time, wtime)); 1938 "New time %ld.\n", cs84xx_time,
1939 wtime);
1948 } 1940 }
1949 } else if (state[0] == FSTATE_READY) { 1941 } else if (state[0] == FSTATE_READY) {
1950 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1942 ql_dbg(ql_dbg_taskm, vha, 0x8037,
1951 vha->host_no)); 1943 "F/W Ready - OK.\n");
1952 1944
1953 qla2x00_get_retry_cnt(vha, &ha->retry_count, 1945 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1954 &ha->login_timeout, &ha->r_a_tov); 1946 &ha->login_timeout, &ha->r_a_tov);
@@ -1965,7 +1957,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1965 * other than Wait for Login. 1957 * other than Wait for Login.
1966 */ 1958 */
1967 if (time_after_eq(jiffies, mtime)) { 1959 if (time_after_eq(jiffies, mtime)) {
1968 qla_printk(KERN_INFO, ha, 1960 ql_log(ql_log_info, vha, 0x8038,
1969 "Cable is unplugged...\n"); 1961 "Cable is unplugged...\n");
1970 1962
1971 vha->device_flags |= DFLG_NO_CABLE; 1963 vha->device_flags |= DFLG_NO_CABLE;
@@ -1985,17 +1977,17 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1985 /* Delay for a while */ 1977 /* Delay for a while */
1986 msleep(500); 1978 msleep(500);
1987 1979
1988 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1980 ql_dbg(ql_dbg_taskm, vha, 0x8039,
1989 vha->host_no, state[0], jiffies)); 1981 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1990 } while (1); 1982 } while (1);
1991 1983
1992 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", 1984 ql_dbg(ql_dbg_taskm, vha, 0x803a,
1993 vha->host_no, state[0], state[1], state[2], state[3], state[4], 1985 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
1994 jiffies)); 1986 state[1], state[2], state[3], state[4], jiffies);
1995 1987
1996 if (rval) { 1988 if (rval) {
1997 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1989 ql_log(ql_log_warn, vha, 0x803b,
1998 vha->host_no)); 1990 "Firmware ready **** FAILED ****.\n");
1999 } 1991 }
2000 1992
2001 return (rval); 1993 return (rval);
@@ -2034,19 +2026,19 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2034 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 2026 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2035 IS_QLA8XXX_TYPE(ha) || 2027 IS_QLA8XXX_TYPE(ha) ||
2036 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 2028 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2037 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 2029 ql_dbg(ql_dbg_disc, vha, 0x2008,
2038 __func__, vha->host_no)); 2030 "Loop is in a transition state.\n");
2039 } else { 2031 } else {
2040 qla_printk(KERN_WARNING, ha, 2032 ql_log(ql_log_warn, vha, 0x2009,
2041 "ERROR -- Unable to get host loop ID.\n"); 2033 "Unable to get host loop ID.\n");
2042 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2034 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2043 } 2035 }
2044 return (rval); 2036 return (rval);
2045 } 2037 }
2046 2038
2047 if (topo == 4) { 2039 if (topo == 4) {
2048 qla_printk(KERN_INFO, ha, 2040 ql_log(ql_log_info, vha, 0x200a,
2049 "Cannot get topology - retrying.\n"); 2041 "Cannot get topology - retrying.\n");
2050 return (QLA_FUNCTION_FAILED); 2042 return (QLA_FUNCTION_FAILED);
2051 } 2043 }
2052 2044
@@ -2059,31 +2051,27 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2059 2051
2060 switch (topo) { 2052 switch (topo) {
2061 case 0: 2053 case 0:
2062 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 2054 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
2063 vha->host_no));
2064 ha->current_topology = ISP_CFG_NL; 2055 ha->current_topology = ISP_CFG_NL;
2065 strcpy(connect_type, "(Loop)"); 2056 strcpy(connect_type, "(Loop)");
2066 break; 2057 break;
2067 2058
2068 case 1: 2059 case 1:
2069 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 2060 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2070 vha->host_no));
2071 ha->switch_cap = sw_cap; 2061 ha->switch_cap = sw_cap;
2072 ha->current_topology = ISP_CFG_FL; 2062 ha->current_topology = ISP_CFG_FL;
2073 strcpy(connect_type, "(FL_Port)"); 2063 strcpy(connect_type, "(FL_Port)");
2074 break; 2064 break;
2075 2065
2076 case 2: 2066 case 2:
2077 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 2067 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
2078 vha->host_no));
2079 ha->operating_mode = P2P; 2068 ha->operating_mode = P2P;
2080 ha->current_topology = ISP_CFG_N; 2069 ha->current_topology = ISP_CFG_N;
2081 strcpy(connect_type, "(N_Port-to-N_Port)"); 2070 strcpy(connect_type, "(N_Port-to-N_Port)");
2082 break; 2071 break;
2083 2072
2084 case 3: 2073 case 3:
2085 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 2074 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2086 vha->host_no));
2087 ha->switch_cap = sw_cap; 2075 ha->switch_cap = sw_cap;
2088 ha->operating_mode = P2P; 2076 ha->operating_mode = P2P;
2089 ha->current_topology = ISP_CFG_F; 2077 ha->current_topology = ISP_CFG_F;
@@ -2091,9 +2079,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2091 break; 2079 break;
2092 2080
2093 default: 2081 default:
2094 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 2082 ql_dbg(ql_dbg_disc, vha, 0x200f,
2095 "Using NL.\n", 2083 "HBA in unknown topology %x, using NL.\n", topo);
2096 vha->host_no, topo));
2097 ha->current_topology = ISP_CFG_NL; 2084 ha->current_topology = ISP_CFG_NL;
2098 strcpy(connect_type, "(Loop)"); 2085 strcpy(connect_type, "(Loop)");
2099 break; 2086 break;
@@ -2106,14 +2093,16 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2106 vha->d_id.b.al_pa = al_pa; 2093 vha->d_id.b.al_pa = al_pa;
2107 2094
2108 if (!vha->flags.init_done) 2095 if (!vha->flags.init_done)
2109 qla_printk(KERN_INFO, ha, 2096 ql_log(ql_log_info, vha, 0x2010,
2110 "Topology - %s, Host Loop address 0x%x\n", 2097 "Topology - %s, Host Loop address 0x%x.\n",
2111 connect_type, vha->loop_id); 2098 connect_type, vha->loop_id);
2112 2099
2113 if (rval) { 2100 if (rval) {
2114 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no)); 2101 ql_log(ql_log_warn, vha, 0x2011,
2102 "%s FAILED\n", __func__);
2115 } else { 2103 } else {
2116 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no)); 2104 ql_dbg(ql_dbg_disc, vha, 0x2012,
2105 "%s success\n", __func__);
2117 } 2106 }
2118 2107
2119 return(rval); 2108 return(rval);
@@ -2227,18 +2216,22 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2227 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 2216 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2228 chksum += *ptr++; 2217 chksum += *ptr++;
2229 2218
2230 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 2219 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
2231 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 2220 "Contents of NVRAM.\n");
2221 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
2222 (uint8_t *)nv, ha->nvram_size);
2232 2223
2233 /* Bad NVRAM data, set defaults parameters. */ 2224 /* Bad NVRAM data, set defaults parameters. */
2234 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 2225 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2235 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 2226 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2236 /* Reset NVRAM data. */ 2227 /* Reset NVRAM data. */
2237 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 2228 ql_log(ql_log_warn, vha, 0x0064,
2238 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 2229 "Inconisistent NVRAM "
2239 nv->nvram_version); 2230 "detected: checksum=0x%x id=%c version=0x%x.\n",
2240 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 2231 chksum, nv->id[0], nv->nvram_version);
2241 "invalid -- WWPN) defaults.\n"); 2232 ql_log(ql_log_warn, vha, 0x0065,
2233 "Falling back to "
2234 "functioning (yet invalid -- WWPN) defaults.\n");
2242 2235
2243 /* 2236 /*
2244 * Set default initialization control block. 2237 * Set default initialization control block.
@@ -2493,10 +2486,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2493 if (ha->zio_mode != QLA_ZIO_DISABLED) { 2486 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2494 ha->zio_mode = QLA_ZIO_MODE_6; 2487 ha->zio_mode = QLA_ZIO_MODE_6;
2495 2488
2496 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 2489 ql_log(ql_log_info, vha, 0x0068,
2497 "delay (%d us).\n", vha->host_no, ha->zio_mode,
2498 ha->zio_timer * 100));
2499 qla_printk(KERN_INFO, ha,
2500 "ZIO mode %d enabled; timer delay (%d us).\n", 2490 "ZIO mode %d enabled; timer delay (%d us).\n",
2501 ha->zio_mode, ha->zio_timer * 100); 2491 ha->zio_mode, ha->zio_timer * 100);
2502 2492
@@ -2507,8 +2497,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2507 } 2497 }
2508 2498
2509 if (rval) { 2499 if (rval) {
2510 DEBUG2_3(printk(KERN_WARNING 2500 ql_log(ql_log_warn, vha, 0x0069,
2511 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 2501 "NVRAM configuration failed.\n");
2512 } 2502 }
2513 return (rval); 2503 return (rval);
2514} 2504}
@@ -2579,15 +2569,15 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2579 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 2569 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2580 rval = qla2x00_configure_hba(vha); 2570 rval = qla2x00_configure_hba(vha);
2581 if (rval != QLA_SUCCESS) { 2571 if (rval != QLA_SUCCESS) {
2582 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2572 ql_dbg(ql_dbg_disc, vha, 0x2013,
2583 vha->host_no)); 2573 "Unable to configure HBA.\n");
2584 return (rval); 2574 return (rval);
2585 } 2575 }
2586 } 2576 }
2587 2577
2588 save_flags = flags = vha->dpc_flags; 2578 save_flags = flags = vha->dpc_flags;
2589 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2579 ql_dbg(ql_dbg_disc, vha, 0x2014,
2590 vha->host_no, flags)); 2580 "Configure loop -- dpc flags = 0x%lx.\n", flags);
2591 2581
2592 /* 2582 /*
2593 * If we have both an RSCN and PORT UPDATE pending then handle them 2583 * If we have both an RSCN and PORT UPDATE pending then handle them
@@ -2624,15 +2614,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2624 } 2614 }
2625 2615
2626 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2616 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2627 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2617 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2618 ql_dbg(ql_dbg_disc, vha, 0x2015,
2619 "Loop resync needed, failing.\n");
2628 rval = QLA_FUNCTION_FAILED; 2620 rval = QLA_FUNCTION_FAILED;
2621 }
2629 else 2622 else
2630 rval = qla2x00_configure_local_loop(vha); 2623 rval = qla2x00_configure_local_loop(vha);
2631 } 2624 }
2632 2625
2633 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2626 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2634 if (LOOP_TRANSITION(vha)) 2627 if (LOOP_TRANSITION(vha)) {
2628 ql_dbg(ql_dbg_disc, vha, 0x201e,
2629 "Needs RSCN update and loop transition.\n");
2635 rval = QLA_FUNCTION_FAILED; 2630 rval = QLA_FUNCTION_FAILED;
2631 }
2636 else 2632 else
2637 rval = qla2x00_configure_fabric(vha); 2633 rval = qla2x00_configure_fabric(vha);
2638 } 2634 }
@@ -2643,16 +2639,17 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2643 rval = QLA_FUNCTION_FAILED; 2639 rval = QLA_FUNCTION_FAILED;
2644 } else { 2640 } else {
2645 atomic_set(&vha->loop_state, LOOP_READY); 2641 atomic_set(&vha->loop_state, LOOP_READY);
2646 2642 ql_dbg(ql_dbg_disc, vha, 0x2069,
2647 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no)); 2643 "LOOP READY.\n");
2648 } 2644 }
2649 } 2645 }
2650 2646
2651 if (rval) { 2647 if (rval) {
2652 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2648 ql_dbg(ql_dbg_disc, vha, 0x206a,
2653 __func__, vha->host_no)); 2649 "%s *** FAILED ***.\n", __func__);
2654 } else { 2650 } else {
2655 DEBUG3(printk("%s: exiting normally\n", __func__)); 2651 ql_dbg(ql_dbg_disc, vha, 0x206b,
2652 "%s: exiting normally.\n", __func__);
2656 } 2653 }
2657 2654
2658 /* Restore state if a resync event occurred during processing */ 2655 /* Restore state if a resync event occurred during processing */
@@ -2700,8 +2697,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2700 new_fcport = NULL; 2697 new_fcport = NULL;
2701 entries = MAX_FIBRE_DEVICES; 2698 entries = MAX_FIBRE_DEVICES;
2702 2699
2703 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no)); 2700 ql_dbg(ql_dbg_disc, vha, 0x2016,
2704 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL)); 2701 "Getting FCAL position map.\n");
2702 if (ql2xextended_error_logging & ql_dbg_disc)
2703 qla2x00_get_fcal_position_map(vha, NULL);
2705 2704
2706 /* Get list of logged in devices. */ 2705 /* Get list of logged in devices. */
2707 memset(ha->gid_list, 0, GID_LIST_SIZE); 2706 memset(ha->gid_list, 0, GID_LIST_SIZE);
@@ -2710,14 +2709,17 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2710 if (rval != QLA_SUCCESS) 2709 if (rval != QLA_SUCCESS)
2711 goto cleanup_allocation; 2710 goto cleanup_allocation;
2712 2711
2713 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2712 ql_dbg(ql_dbg_disc, vha, 0x2017,
2714 vha->host_no, entries)); 2713 "Entries in ID list (%d).\n", entries);
2715 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2714 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
2716 entries * sizeof(struct gid_list_info))); 2715 (uint8_t *)ha->gid_list,
2716 entries * sizeof(struct gid_list_info));
2717 2717
2718 /* Allocate temporary fcport for any new fcports discovered. */ 2718 /* Allocate temporary fcport for any new fcports discovered. */
2719 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2719 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2720 if (new_fcport == NULL) { 2720 if (new_fcport == NULL) {
2721 ql_log(ql_log_warn, vha, 0x2018,
2722 "Memory allocation failed for fcport.\n");
2721 rval = QLA_MEMORY_ALLOC_FAILED; 2723 rval = QLA_MEMORY_ALLOC_FAILED;
2722 goto cleanup_allocation; 2724 goto cleanup_allocation;
2723 } 2725 }
@@ -2731,9 +2733,9 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2731 fcport->port_type != FCT_BROADCAST && 2733 fcport->port_type != FCT_BROADCAST &&
2732 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2734 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2733 2735
2734 DEBUG(printk("scsi(%ld): Marking port lost, " 2736 ql_dbg(ql_dbg_disc, vha, 0x2019,
2735 "loop_id=0x%04x\n", 2737 "Marking port lost loop_id=0x%04x.\n",
2736 vha->host_no, fcport->loop_id)); 2738 fcport->loop_id);
2737 2739
2738 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2740 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2739 } 2741 }
@@ -2774,12 +2776,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2774 new_fcport->vp_idx = vha->vp_idx; 2776 new_fcport->vp_idx = vha->vp_idx;
2775 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2777 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2776 if (rval2 != QLA_SUCCESS) { 2778 if (rval2 != QLA_SUCCESS) {
2777 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2779 ql_dbg(ql_dbg_disc, vha, 0x201a,
2778 "information -- get_port_database=%x, " 2780 "Failed to retrieve fcport information "
2779 "loop_id=0x%04x\n", 2781 "-- get_port_database=%x, loop_id=0x%04x.\n",
2780 vha->host_no, rval2, new_fcport->loop_id)); 2782 rval2, new_fcport->loop_id);
2781 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2783 ql_dbg(ql_dbg_disc, vha, 0x201b,
2782 vha->host_no)); 2784 "Scheduling resync.\n");
2783 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2785 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2784 continue; 2786 continue;
2785 } 2787 }
@@ -2815,6 +2817,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2815 fcport = new_fcport; 2817 fcport = new_fcport;
2816 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2818 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2817 if (new_fcport == NULL) { 2819 if (new_fcport == NULL) {
2820 ql_log(ql_log_warn, vha, 0x201c,
2821 "Failed to allocate memory for fcport.\n");
2818 rval = QLA_MEMORY_ALLOC_FAILED; 2822 rval = QLA_MEMORY_ALLOC_FAILED;
2819 goto cleanup_allocation; 2823 goto cleanup_allocation;
2820 } 2824 }
@@ -2833,8 +2837,8 @@ cleanup_allocation:
2833 kfree(new_fcport); 2837 kfree(new_fcport);
2834 2838
2835 if (rval != QLA_SUCCESS) { 2839 if (rval != QLA_SUCCESS) {
2836 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2840 ql_dbg(ql_dbg_disc, vha, 0x201d,
2837 "rval=%x\n", vha->host_no, rval)); 2841 "Configure local loop error exit: rval=%x.\n", rval);
2838 } 2842 }
2839 2843
2840 return (rval); 2844 return (rval);
@@ -2863,27 +2867,27 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2863 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 2867 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2864 mb); 2868 mb);
2865 if (rval != QLA_SUCCESS) { 2869 if (rval != QLA_SUCCESS) {
2866 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2870 ql_dbg(ql_dbg_disc, vha, 0x2004,
2867 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2871 "Unable to adjust iIDMA "
2868 vha->host_no, fcport->port_name[0], fcport->port_name[1], 2872 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
2873 "%04x.\n", fcport->port_name[0], fcport->port_name[1],
2869 fcport->port_name[2], fcport->port_name[3], 2874 fcport->port_name[2], fcport->port_name[3],
2870 fcport->port_name[4], fcport->port_name[5], 2875 fcport->port_name[4], fcport->port_name[5],
2871 fcport->port_name[6], fcport->port_name[7], rval, 2876 fcport->port_name[6], fcport->port_name[7], rval,
2872 fcport->fp_speed, mb[0], mb[1])); 2877 fcport->fp_speed, mb[0], mb[1]);
2873 } else { 2878 } else {
2874 link_speed = link_speeds[LS_UNKNOWN]; 2879 link_speed = link_speeds[LS_UNKNOWN];
2875 if (fcport->fp_speed < 5) 2880 if (fcport->fp_speed < 5)
2876 link_speed = link_speeds[fcport->fp_speed]; 2881 link_speed = link_speeds[fcport->fp_speed];
2877 else if (fcport->fp_speed == 0x13) 2882 else if (fcport->fp_speed == 0x13)
2878 link_speed = link_speeds[5]; 2883 link_speed = link_speeds[5];
2879 DEBUG2(qla_printk(KERN_INFO, ha, 2884 ql_dbg(ql_dbg_disc, vha, 0x2005,
2880 "iIDMA adjusted to %s GB/s on " 2885 "iIDMA adjusted to %s GB/s "
2881 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2886 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
2882 link_speed, fcport->port_name[0], 2887 fcport->port_name[0], fcport->port_name[1],
2883 fcport->port_name[1], fcport->port_name[2], 2888 fcport->port_name[2], fcport->port_name[3],
2884 fcport->port_name[3], fcport->port_name[4], 2889 fcport->port_name[4], fcport->port_name[5],
2885 fcport->port_name[5], fcport->port_name[6], 2890 fcport->port_name[6], fcport->port_name[7]);
2886 fcport->port_name[7]));
2887 } 2891 }
2888} 2892}
2889 2893
@@ -2892,7 +2896,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2892{ 2896{
2893 struct fc_rport_identifiers rport_ids; 2897 struct fc_rport_identifiers rport_ids;
2894 struct fc_rport *rport; 2898 struct fc_rport *rport;
2895 struct qla_hw_data *ha = vha->hw;
2896 unsigned long flags; 2899 unsigned long flags;
2897 2900
2898 qla2x00_rport_del(fcport); 2901 qla2x00_rport_del(fcport);
@@ -2904,8 +2907,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2904 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2907 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2905 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 2908 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2906 if (!rport) { 2909 if (!rport) {
2907 qla_printk(KERN_WARNING, ha, 2910 ql_log(ql_log_warn, vha, 0x2006,
2908 "Unable to allocate fc remote port!\n"); 2911 "Unable to allocate fc remote port.\n");
2909 return; 2912 return;
2910 } 2913 }
2911 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2914 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -2980,8 +2983,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2980 loop_id = SNS_FL_PORT; 2983 loop_id = SNS_FL_PORT;
2981 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 2984 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2982 if (rval != QLA_SUCCESS) { 2985 if (rval != QLA_SUCCESS) {
2983 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2986 ql_dbg(ql_dbg_disc, vha, 0x201f,
2984 "Port\n", vha->host_no)); 2987 "MBX_GET_PORT_NAME failed, No FL Port.\n");
2985 2988
2986 vha->device_flags &= ~SWITCH_FOUND; 2989 vha->device_flags &= ~SWITCH_FOUND;
2987 return (QLA_SUCCESS); 2990 return (QLA_SUCCESS);
@@ -3008,32 +3011,32 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3008 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 3011 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3009 0xfc, mb, BIT_1 | BIT_0); 3012 0xfc, mb, BIT_1 | BIT_0);
3010 if (mb[0] != MBS_COMMAND_COMPLETE) { 3013 if (mb[0] != MBS_COMMAND_COMPLETE) {
3011 DEBUG2(qla_printk(KERN_INFO, ha, 3014 ql_dbg(ql_dbg_disc, vha, 0x2042,
3012 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 3015 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
3013 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id, 3016 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
3014 mb[0], mb[1], mb[2], mb[6], mb[7])); 3017 mb[2], mb[6], mb[7]);
3015 return (QLA_SUCCESS); 3018 return (QLA_SUCCESS);
3016 } 3019 }
3017 3020
3018 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 3021 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3019 if (qla2x00_rft_id(vha)) { 3022 if (qla2x00_rft_id(vha)) {
3020 /* EMPTY */ 3023 /* EMPTY */
3021 DEBUG2(printk("scsi(%ld): Register FC-4 " 3024 ql_dbg(ql_dbg_disc, vha, 0x2045,
3022 "TYPE failed.\n", vha->host_no)); 3025 "Register FC-4 TYPE failed.\n");
3023 } 3026 }
3024 if (qla2x00_rff_id(vha)) { 3027 if (qla2x00_rff_id(vha)) {
3025 /* EMPTY */ 3028 /* EMPTY */
3026 DEBUG2(printk("scsi(%ld): Register FC-4 " 3029 ql_dbg(ql_dbg_disc, vha, 0x2049,
3027 "Features failed.\n", vha->host_no)); 3030 "Register FC-4 Features failed.\n");
3028 } 3031 }
3029 if (qla2x00_rnn_id(vha)) { 3032 if (qla2x00_rnn_id(vha)) {
3030 /* EMPTY */ 3033 /* EMPTY */
3031 DEBUG2(printk("scsi(%ld): Register Node Name " 3034 ql_dbg(ql_dbg_disc, vha, 0x204f,
3032 "failed.\n", vha->host_no)); 3035 "Register Node Name failed.\n");
3033 } else if (qla2x00_rsnn_nn(vha)) { 3036 } else if (qla2x00_rsnn_nn(vha)) {
3034 /* EMPTY */ 3037 /* EMPTY */
3035 DEBUG2(printk("scsi(%ld): Register Symbolic " 3038 ql_dbg(ql_dbg_disc, vha, 0x2053,
3036 "Node Name failed.\n", vha->host_no)); 3039 "Register Symobilic Node Name failed.\n");
3037 } 3040 }
3038 } 3041 }
3039 3042
@@ -3137,8 +3140,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3137 } 3140 }
3138 3141
3139 if (rval) { 3142 if (rval) {
3140 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 3143 ql_dbg(ql_dbg_disc, vha, 0x2068,
3141 "rval=%d\n", vha->host_no, rval)); 3144 "Configure fabric error exit rval=%d.\n", rval);
3142 } 3145 }
3143 3146
3144 return (rval); 3147 return (rval);
@@ -3180,8 +3183,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3180 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 3183 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3181 if (!swl) { 3184 if (!swl) {
3182 /*EMPTY*/ 3185 /*EMPTY*/
3183 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 3186 ql_dbg(ql_dbg_disc, vha, 0x2054,
3184 "on GA_NXT\n", vha->host_no)); 3187 "GID_PT allocations failed, fallback on GA_NXT.\n");
3185 } else { 3188 } else {
3186 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 3189 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3187 kfree(swl); 3190 kfree(swl);
@@ -3206,6 +3209,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3206 /* Allocate temporary fcport for any new fcports discovered. */ 3209 /* Allocate temporary fcport for any new fcports discovered. */
3207 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3210 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3208 if (new_fcport == NULL) { 3211 if (new_fcport == NULL) {
3212 ql_log(ql_log_warn, vha, 0x205e,
3213 "Failed to allocate memory for fcport.\n");
3209 kfree(swl); 3214 kfree(swl);
3210 return (QLA_MEMORY_ALLOC_FAILED); 3215 return (QLA_MEMORY_ALLOC_FAILED);
3211 } 3216 }
@@ -3252,9 +3257,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3252 /* Send GA_NXT to the switch */ 3257 /* Send GA_NXT to the switch */
3253 rval = qla2x00_ga_nxt(vha, new_fcport); 3258 rval = qla2x00_ga_nxt(vha, new_fcport);
3254 if (rval != QLA_SUCCESS) { 3259 if (rval != QLA_SUCCESS) {
3255 qla_printk(KERN_WARNING, ha, 3260 ql_log(ql_log_warn, vha, 0x2064,
3256 "SNS scan failed -- assuming zero-entry " 3261 "SNS scan failed -- assuming "
3257 "result...\n"); 3262 "zero-entry result.\n");
3258 list_for_each_entry_safe(fcport, fcptemp, 3263 list_for_each_entry_safe(fcport, fcptemp,
3259 new_fcports, list) { 3264 new_fcports, list) {
3260 list_del(&fcport->list); 3265 list_del(&fcport->list);
@@ -3270,9 +3275,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3270 wrap.b24 = new_fcport->d_id.b24; 3275 wrap.b24 = new_fcport->d_id.b24;
3271 first_dev = 0; 3276 first_dev = 0;
3272 } else if (new_fcport->d_id.b24 == wrap.b24) { 3277 } else if (new_fcport->d_id.b24 == wrap.b24) {
3273 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 3278 ql_dbg(ql_dbg_disc, vha, 0x2065,
3274 vha->host_no, new_fcport->d_id.b.domain, 3279 "Device wrap (%02x%02x%02x).\n",
3275 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 3280 new_fcport->d_id.b.domain,
3281 new_fcport->d_id.b.area,
3282 new_fcport->d_id.b.al_pa);
3276 break; 3283 break;
3277 } 3284 }
3278 3285
@@ -3377,6 +3384,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3377 nxt_d_id.b24 = new_fcport->d_id.b24; 3384 nxt_d_id.b24 = new_fcport->d_id.b24;
3378 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3385 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3379 if (new_fcport == NULL) { 3386 if (new_fcport == NULL) {
3387 ql_log(ql_log_warn, vha, 0x2066,
3388 "Memory allocation failed for fcport.\n");
3380 kfree(swl); 3389 kfree(swl);
3381 return (QLA_MEMORY_ALLOC_FAILED); 3390 return (QLA_MEMORY_ALLOC_FAILED);
3382 } 3391 }
@@ -3506,10 +3515,10 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3506 d_id.b.area = MSB(LSW(rscn_entry)); 3515 d_id.b.area = MSB(LSW(rscn_entry));
3507 d_id.b.al_pa = LSB(LSW(rscn_entry)); 3516 d_id.b.al_pa = LSB(LSW(rscn_entry));
3508 3517
3509 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 3518 ql_dbg(ql_dbg_disc, vha, 0x2020,
3510 "[%02x/%02x%02x%02x].\n", 3519 "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
3511 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain, 3520 vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
3512 d_id.b.area, d_id.b.al_pa)); 3521 d_id.b.al_pa);
3513 3522
3514 vha->rscn_out_ptr++; 3523 vha->rscn_out_ptr++;
3515 if (vha->rscn_out_ptr == MAX_RSCN_COUNT) 3524 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
@@ -3525,17 +3534,17 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3525 if (rscn_entry != vha->rscn_queue[rscn_out_iter]) 3534 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3526 break; 3535 break;
3527 3536
3528 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 3537 ql_dbg(ql_dbg_disc, vha, 0x2021,
3529 "entry found at [%d].\n", vha->host_no, 3538 "Skipping duplicate RSCN queue entry found at "
3530 rscn_out_iter)); 3539 "[%d].\n", rscn_out_iter);
3531 3540
3532 vha->rscn_out_ptr = rscn_out_iter; 3541 vha->rscn_out_ptr = rscn_out_iter;
3533 } 3542 }
3534 3543
3535 /* Queue overflow, set switch default case. */ 3544 /* Queue overflow, set switch default case. */
3536 if (vha->flags.rscn_queue_overflow) { 3545 if (vha->flags.rscn_queue_overflow) {
3537 DEBUG(printk("scsi(%ld): device_resync: rscn " 3546 ql_dbg(ql_dbg_disc, vha, 0x2022,
3538 "overflow.\n", vha->host_no)); 3547 "device_resync: rscn overflow.\n");
3539 3548
3540 format = 3; 3549 format = 3;
3541 vha->flags.rscn_queue_overflow = 0; 3550 vha->flags.rscn_queue_overflow = 0;
@@ -3664,10 +3673,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3664 tmp_loopid = 0; 3673 tmp_loopid = 0;
3665 3674
3666 for (;;) { 3675 for (;;) {
3667 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3676 ql_dbg(ql_dbg_disc, vha, 0x2000,
3668 "for port %02x%02x%02x.\n", 3677 "Trying Fabric Login w/loop id 0x%04x for port "
3669 vha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3678 "%02x%02x%02x.\n",
3670 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3679 fcport->loop_id, fcport->d_id.b.domain,
3680 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3671 3681
3672 /* Login fcport on switch. */ 3682 /* Login fcport on switch. */
3673 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3683 ha->isp_ops->fabric_login(vha, fcport->loop_id,
@@ -3685,10 +3695,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3685 tmp_loopid = fcport->loop_id; 3695 tmp_loopid = fcport->loop_id;
3686 fcport->loop_id = mb[1]; 3696 fcport->loop_id = mb[1];
3687 3697
3688 DEBUG(printk("Fabric Login: port in use - next " 3698 ql_dbg(ql_dbg_disc, vha, 0x2001,
3689 "loop id=0x%04x, port Id=%02x%02x%02x.\n", 3699 "Fabric Login: port in use - next loop "
3700 "id=0x%04x, port id= %02x%02x%02x.\n",
3690 fcport->loop_id, fcport->d_id.b.domain, 3701 fcport->loop_id, fcport->d_id.b.domain,
3691 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3702 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3692 3703
3693 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 3704 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3694 /* 3705 /*
@@ -3749,11 +3760,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3749 /* 3760 /*
3750 * unrecoverable / not handled error 3761 * unrecoverable / not handled error
3751 */ 3762 */
3752 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3763 ql_dbg(ql_dbg_disc, vha, 0x2002,
3753 "loop_id=%x jiffies=%lx.\n", 3764 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
3754 __func__, vha->host_no, mb[0], 3765 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
3755 fcport->d_id.b.domain, fcport->d_id.b.area, 3766 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3756 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3767 fcport->loop_id, jiffies);
3757 3768
3758 *next_loopid = fcport->loop_id; 3769 *next_loopid = fcport->loop_id;
3759 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3770 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3857,7 +3868,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3857 return (QLA_FUNCTION_FAILED); 3868 return (QLA_FUNCTION_FAILED);
3858 3869
3859 if (rval) 3870 if (rval)
3860 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3871 ql_dbg(ql_dbg_disc, vha, 0x206c,
3872 "%s *** FAILED ***.\n", __func__);
3861 3873
3862 return (rval); 3874 return (rval);
3863} 3875}
@@ -3934,8 +3946,8 @@ qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
3934 struct qla_hw_data *ha = vha->hw; 3946 struct qla_hw_data *ha = vha->hw;
3935 struct scsi_qla_host *vp; 3947 struct scsi_qla_host *vp;
3936 3948
3937 qla_printk(KERN_INFO, ha, 3949 ql_dbg(ql_dbg_p3p, vha, 0xb002,
3938 "Performing ISP error recovery - ha= %p.\n", ha); 3950 "Performing ISP error recovery - ha=%p.\n", ha);
3939 3951
3940 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3952 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3941 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3953 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -3969,8 +3981,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3969 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3981 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3970 ha->qla_stats.total_isp_aborts++; 3982 ha->qla_stats.total_isp_aborts++;
3971 3983
3972 qla_printk(KERN_INFO, ha, 3984 ql_log(ql_log_info, vha, 0x00af,
3973 "Performing ISP error recovery - ha= %p.\n", ha); 3985 "Performing ISP error recovery - ha=%p.\n", ha);
3974 3986
3975 /* For ISP82XX, reset_chip is just disabling interrupts. 3987 /* For ISP82XX, reset_chip is just disabling interrupts.
3976 * Driver waits for the completion of the commands. 3988 * Driver waits for the completion of the commands.
@@ -4021,6 +4033,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4021 /* Make sure for ISP 82XX IO DMA is complete */ 4033 /* Make sure for ISP 82XX IO DMA is complete */
4022 if (IS_QLA82XX(ha)) { 4034 if (IS_QLA82XX(ha)) {
4023 qla82xx_chip_reset_cleanup(vha); 4035 qla82xx_chip_reset_cleanup(vha);
4036 ql_log(ql_log_info, vha, 0x00b4,
4037 "Done chip reset cleanup.\n");
4024 4038
4025 /* Done waiting for pending commands. 4039 /* Done waiting for pending commands.
4026 * Reset the online flag. 4040 * Reset the online flag.
@@ -4102,7 +4116,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4102 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 4116 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4103 &ha->fce_bufs); 4117 &ha->fce_bufs);
4104 if (rval) { 4118 if (rval) {
4105 qla_printk(KERN_WARNING, ha, 4119 ql_log(ql_log_warn, vha, 0x8033,
4106 "Unable to reinitialize FCE " 4120 "Unable to reinitialize FCE "
4107 "(%d).\n", rval); 4121 "(%d).\n", rval);
4108 ha->flags.fce_enabled = 0; 4122 ha->flags.fce_enabled = 0;
@@ -4114,7 +4128,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4114 rval = qla2x00_enable_eft_trace(vha, 4128 rval = qla2x00_enable_eft_trace(vha,
4115 ha->eft_dma, EFT_NUM_BUFFERS); 4129 ha->eft_dma, EFT_NUM_BUFFERS);
4116 if (rval) { 4130 if (rval) {
4117 qla_printk(KERN_WARNING, ha, 4131 ql_log(ql_log_warn, vha, 0x8034,
4118 "Unable to reinitialize EFT " 4132 "Unable to reinitialize EFT "
4119 "(%d).\n", rval); 4133 "(%d).\n", rval);
4120 } 4134 }
@@ -4123,9 +4137,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4123 vha->flags.online = 1; 4137 vha->flags.online = 1;
4124 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 4138 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4125 if (ha->isp_abort_cnt == 0) { 4139 if (ha->isp_abort_cnt == 0) {
4126 qla_printk(KERN_WARNING, ha, 4140 ql_log(ql_log_fatal, vha, 0x8035,
4127 "ISP error recovery failed - " 4141 "ISP error recover failed - "
4128 "board disabled\n"); 4142 "board disabled.\n");
4129 /* 4143 /*
4130 * The next call disables the board 4144 * The next call disables the board
4131 * completely. 4145 * completely.
@@ -4137,16 +4151,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4137 status = 0; 4151 status = 0;
4138 } else { /* schedule another ISP abort */ 4152 } else { /* schedule another ISP abort */
4139 ha->isp_abort_cnt--; 4153 ha->isp_abort_cnt--;
4140 DEBUG(printk("qla%ld: ISP abort - " 4154 ql_dbg(ql_dbg_taskm, vha, 0x8020,
4141 "retry remaining %d\n", 4155 "ISP abort - retry remaining %d.\n",
4142 vha->host_no, ha->isp_abort_cnt)); 4156 ha->isp_abort_cnt);
4143 status = 1; 4157 status = 1;
4144 } 4158 }
4145 } else { 4159 } else {
4146 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 4160 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4147 DEBUG(printk("qla2x00(%ld): ISP error recovery " 4161 ql_dbg(ql_dbg_taskm, vha, 0x8021,
4148 "- retrying (%d) more times\n", 4162 "ISP error recovery - retrying (%d) "
4149 vha->host_no, ha->isp_abort_cnt)); 4163 "more times.\n", ha->isp_abort_cnt);
4150 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4164 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4151 status = 1; 4165 status = 1;
4152 } 4166 }
@@ -4155,9 +4169,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4155 } 4169 }
4156 4170
4157 if (!status) { 4171 if (!status) {
4158 DEBUG(printk(KERN_INFO 4172 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
4159 "qla2x00_abort_isp(%ld): succeeded.\n",
4160 vha->host_no));
4161 4173
4162 spin_lock_irqsave(&ha->vport_slock, flags); 4174 spin_lock_irqsave(&ha->vport_slock, flags);
4163 list_for_each_entry(vp, &ha->vp_list, list) { 4175 list_for_each_entry(vp, &ha->vp_list, list) {
@@ -4174,8 +4186,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4174 spin_unlock_irqrestore(&ha->vport_slock, flags); 4186 spin_unlock_irqrestore(&ha->vport_slock, flags);
4175 4187
4176 } else { 4188 } else {
4177 qla_printk(KERN_INFO, ha, 4189 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
4178 "qla2x00_abort_isp: **** FAILED ****\n");
4179 } 4190 }
4180 4191
4181 return(status); 4192 return(status);
@@ -4216,8 +4227,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4216 4227
4217 status = qla2x00_fw_ready(vha); 4228 status = qla2x00_fw_ready(vha);
4218 if (!status) { 4229 if (!status) {
4219 DEBUG(printk("%s(): Start configure loop, " 4230 ql_dbg(ql_dbg_taskm, vha, 0x8031,
4220 "status = %d\n", __func__, status)); 4231 "Start configure loop status = %d.\n", status);
4221 4232
4222 /* Issue a marker after FW becomes ready. */ 4233 /* Issue a marker after FW becomes ready. */
4223 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4234 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4239,9 +4250,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4239 if ((vha->device_flags & DFLG_NO_CABLE)) 4250 if ((vha->device_flags & DFLG_NO_CABLE))
4240 status = 0; 4251 status = 0;
4241 4252
4242 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 4253 ql_dbg(ql_dbg_taskm, vha, 0x8032,
4243 __func__, 4254 "Configure loop done, status = 0x%x.\n", status);
4244 status));
4245 } 4255 }
4246 return (status); 4256 return (status);
4247} 4257}
@@ -4261,13 +4271,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4261 rsp->options &= ~BIT_0; 4271 rsp->options &= ~BIT_0;
4262 ret = qla25xx_init_rsp_que(base_vha, rsp); 4272 ret = qla25xx_init_rsp_que(base_vha, rsp);
4263 if (ret != QLA_SUCCESS) 4273 if (ret != QLA_SUCCESS)
4264 DEBUG2_17(printk(KERN_WARNING 4274 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
4265 "%s Rsp que:%d init failed\n", __func__, 4275 "%s Rsp que: %d init failed.\n",
4266 rsp->id)); 4276 __func__, rsp->id);
4267 else 4277 else
4268 DEBUG2_17(printk(KERN_INFO 4278 ql_dbg(ql_dbg_init, base_vha, 0x0100,
4269 "%s Rsp que:%d inited\n", __func__, 4279 "%s Rsp que: %d inited.\n",
4270 rsp->id)); 4280 __func__, rsp->id);
4271 } 4281 }
4272 } 4282 }
4273 for (i = 1; i < ha->max_req_queues; i++) { 4283 for (i = 1; i < ha->max_req_queues; i++) {
@@ -4277,13 +4287,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4277 req->options &= ~BIT_0; 4287 req->options &= ~BIT_0;
4278 ret = qla25xx_init_req_que(base_vha, req); 4288 ret = qla25xx_init_req_que(base_vha, req);
4279 if (ret != QLA_SUCCESS) 4289 if (ret != QLA_SUCCESS)
4280 DEBUG2_17(printk(KERN_WARNING 4290 ql_dbg(ql_dbg_init, base_vha, 0x0101,
4281 "%s Req que:%d init failed\n", __func__, 4291 "%s Req que: %d init failed.\n",
4282 req->id)); 4292 __func__, req->id);
4283 else 4293 else
4284 DEBUG2_17(printk(KERN_WARNING 4294 ql_dbg(ql_dbg_init, base_vha, 0x0102,
4285 "%s Req que:%d inited\n", __func__, 4295 "%s Req que: %d inited.\n",
4286 req->id)); 4296 __func__, req->id);
4287 } 4297 }
4288 } 4298 }
4289 return ret; 4299 return ret;
@@ -4402,19 +4412,22 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4402 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4412 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4403 chksum += le32_to_cpu(*dptr++); 4413 chksum += le32_to_cpu(*dptr++);
4404 4414
4405 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 4415 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
4406 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4416 "Contents of NVRAM\n");
4417 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
4418 (uint8_t *)nv, ha->nvram_size);
4407 4419
4408 /* Bad NVRAM data, set defaults parameters. */ 4420 /* Bad NVRAM data, set defaults parameters. */
4409 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 4421 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4410 || nv->id[3] != ' ' || 4422 || nv->id[3] != ' ' ||
4411 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4423 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4412 /* Reset NVRAM data. */ 4424 /* Reset NVRAM data. */
4413 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 4425 ql_log(ql_log_warn, vha, 0x006b,
4414 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 4426 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
4415 le16_to_cpu(nv->nvram_version)); 4427 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4416 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 4428 ql_log(ql_log_warn, vha, 0x006c,
4417 "invalid -- WWPN) defaults.\n"); 4429 "Falling back to functioning (yet invalid -- WWPN) "
4430 "defaults.\n");
4418 4431
4419 /* 4432 /*
4420 * Set default initialization control block. 4433 * Set default initialization control block.
@@ -4592,10 +4605,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4592 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4605 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4593 ha->zio_mode = QLA_ZIO_MODE_6; 4606 ha->zio_mode = QLA_ZIO_MODE_6;
4594 4607
4595 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 4608 ql_log(ql_log_info, vha, 0x006f,
4596 "(%d us).\n", vha->host_no, ha->zio_mode,
4597 ha->zio_timer * 100));
4598 qla_printk(KERN_INFO, ha,
4599 "ZIO mode %d enabled; timer delay (%d us).\n", 4609 "ZIO mode %d enabled; timer delay (%d us).\n",
4600 ha->zio_mode, ha->zio_timer * 100); 4610 ha->zio_mode, ha->zio_timer * 100);
4601 4611
@@ -4606,8 +4616,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4606 } 4616 }
4607 4617
4608 if (rval) { 4618 if (rval) {
4609 DEBUG2_3(printk(KERN_WARNING 4619 ql_log(ql_log_warn, vha, 0x0070,
4610 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 4620 "NVRAM configuration failed.\n");
4611 } 4621 }
4612 return (rval); 4622 return (rval);
4613} 4623}
@@ -4625,8 +4635,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4625 struct qla_hw_data *ha = vha->hw; 4635 struct qla_hw_data *ha = vha->hw;
4626 struct req_que *req = ha->req_q_map[0]; 4636 struct req_que *req = ha->req_q_map[0];
4627 4637
4628 qla_printk(KERN_INFO, ha, 4638 ql_dbg(ql_dbg_init, vha, 0x008b,
4629 "FW: Loading from flash (%x)...\n", faddr); 4639 "Loading firmware from flash (%x).\n", faddr);
4630 4640
4631 rval = QLA_SUCCESS; 4641 rval = QLA_SUCCESS;
4632 4642
@@ -4642,11 +4652,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4642 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4652 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4643 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4653 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4644 dcode[3] == 0)) { 4654 dcode[3] == 0)) {
4645 qla_printk(KERN_WARNING, ha, 4655 ql_log(ql_log_fatal, vha, 0x008c,
4646 "Unable to verify integrity of flash firmware image!\n"); 4656 "Unable to verify the integrity of flash firmware "
4647 qla_printk(KERN_WARNING, ha, 4657 "image.\n");
4648 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4658 ql_log(ql_log_fatal, vha, 0x008d,
4649 dcode[1], dcode[2], dcode[3]); 4659 "Firmware data: %08x %08x %08x %08x.\n",
4660 dcode[0], dcode[1], dcode[2], dcode[3]);
4650 4661
4651 return QLA_FUNCTION_FAILED; 4662 return QLA_FUNCTION_FAILED;
4652 } 4663 }
@@ -4665,9 +4676,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4665 if (dlen > risc_size) 4676 if (dlen > risc_size)
4666 dlen = risc_size; 4677 dlen = risc_size;
4667 4678
4668 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4679 ql_dbg(ql_dbg_init, vha, 0x008e,
4669 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 4680 "Loading risc segment@ risc addr %x "
4670 vha->host_no, risc_addr, dlen, faddr)); 4681 "number of dwords 0x%x offset 0x%x.\n",
4682 risc_addr, dlen, faddr);
4671 4683
4672 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 4684 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4673 for (i = 0; i < dlen; i++) 4685 for (i = 0; i < dlen; i++)
@@ -4676,12 +4688,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4676 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4688 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4677 dlen); 4689 dlen);
4678 if (rval) { 4690 if (rval) {
4679 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4691 ql_log(ql_log_fatal, vha, 0x008f,
4680 "segment %d of firmware\n", vha->host_no, 4692 "Failed to load segment %d of firmware.\n",
4681 fragment)); 4693 fragment);
4682 qla_printk(KERN_WARNING, ha,
4683 "[ERROR] Failed to load segment %d of "
4684 "firmware\n", fragment);
4685 break; 4694 break;
4686 } 4695 }
4687 4696
@@ -4714,9 +4723,10 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4714 /* Load firmware blob. */ 4723 /* Load firmware blob. */
4715 blob = qla2x00_request_firmware(vha); 4724 blob = qla2x00_request_firmware(vha);
4716 if (!blob) { 4725 if (!blob) {
4717 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4726 ql_log(ql_log_info, vha, 0x0083,
4718 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4727 "Fimware image unavailable.\n");
4719 "from: " QLA_FW_URL ".\n"); 4728 ql_log(ql_log_info, vha, 0x0084,
4729 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
4720 return QLA_FUNCTION_FAILED; 4730 return QLA_FUNCTION_FAILED;
4721 } 4731 }
4722 4732
@@ -4729,8 +4739,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4729 4739
4730 /* Validate firmware image by checking version. */ 4740 /* Validate firmware image by checking version. */
4731 if (blob->fw->size < 8 * sizeof(uint16_t)) { 4741 if (blob->fw->size < 8 * sizeof(uint16_t)) {
4732 qla_printk(KERN_WARNING, ha, 4742 ql_log(ql_log_fatal, vha, 0x0085,
4733 "Unable to verify integrity of firmware image (%Zd)!\n", 4743 "Unable to verify integrity of firmware image (%Zd).\n",
4734 blob->fw->size); 4744 blob->fw->size);
4735 goto fail_fw_integrity; 4745 goto fail_fw_integrity;
4736 } 4746 }
@@ -4739,11 +4749,11 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4739 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 4749 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4740 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 4750 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4741 wcode[2] == 0 && wcode[3] == 0)) { 4751 wcode[2] == 0 && wcode[3] == 0)) {
4742 qla_printk(KERN_WARNING, ha, 4752 ql_log(ql_log_fatal, vha, 0x0086,
4743 "Unable to verify integrity of firmware image!\n"); 4753 "Unable to verify integrity of firmware image.\n");
4744 qla_printk(KERN_WARNING, ha, 4754 ql_log(ql_log_fatal, vha, 0x0087,
4745 "Firmware data: %04x %04x %04x %04x!\n", wcode[0], 4755 "Firmware data: %04x %04x %04x %04x.\n",
4746 wcode[1], wcode[2], wcode[3]); 4756 wcode[0], wcode[1], wcode[2], wcode[3]);
4747 goto fail_fw_integrity; 4757 goto fail_fw_integrity;
4748 } 4758 }
4749 4759
@@ -4756,9 +4766,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4756 /* Validate firmware image size. */ 4766 /* Validate firmware image size. */
4757 fwclen += risc_size * sizeof(uint16_t); 4767 fwclen += risc_size * sizeof(uint16_t);
4758 if (blob->fw->size < fwclen) { 4768 if (blob->fw->size < fwclen) {
4759 qla_printk(KERN_WARNING, ha, 4769 ql_log(ql_log_fatal, vha, 0x0088,
4760 "Unable to verify integrity of firmware image " 4770 "Unable to verify integrity of firmware image "
4761 "(%Zd)!\n", blob->fw->size); 4771 "(%Zd).\n", blob->fw->size);
4762 goto fail_fw_integrity; 4772 goto fail_fw_integrity;
4763 } 4773 }
4764 4774
@@ -4767,10 +4777,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4767 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 4777 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4768 if (wlen > risc_size) 4778 if (wlen > risc_size)
4769 wlen = risc_size; 4779 wlen = risc_size;
4770 4780 ql_dbg(ql_dbg_init, vha, 0x0089,
4771 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4781 "Loading risc segment@ risc addr %x number of "
4772 "addr %x, number of words 0x%x.\n", vha->host_no, 4782 "words 0x%x.\n", risc_addr, wlen);
4773 risc_addr, wlen));
4774 4783
4775 for (i = 0; i < wlen; i++) 4784 for (i = 0; i < wlen; i++)
4776 wcode[i] = swab16(fwcode[i]); 4785 wcode[i] = swab16(fwcode[i]);
@@ -4778,12 +4787,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4778 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4787 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4779 wlen); 4788 wlen);
4780 if (rval) { 4789 if (rval) {
4781 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4790 ql_log(ql_log_fatal, vha, 0x008a,
4782 "segment %d of firmware\n", vha->host_no, 4791 "Failed to load segment %d of firmware.\n",
4783 fragment)); 4792 fragment);
4784 qla_printk(KERN_WARNING, ha,
4785 "[ERROR] Failed to load segment %d of "
4786 "firmware\n", fragment);
4787 break; 4793 break;
4788 } 4794 }
4789 4795
@@ -4819,15 +4825,17 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4819 /* Load firmware blob. */ 4825 /* Load firmware blob. */
4820 blob = qla2x00_request_firmware(vha); 4826 blob = qla2x00_request_firmware(vha);
4821 if (!blob) { 4827 if (!blob) {
4822 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4828 ql_log(ql_log_warn, vha, 0x0090,
4823 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4829 "Fimware image unavailable.\n");
4824 "from: " QLA_FW_URL ".\n"); 4830 ql_log(ql_log_warn, vha, 0x0091,
4831 "Firmware images can be retrieved from: "
4832 QLA_FW_URL ".\n");
4825 4833
4826 return QLA_FUNCTION_FAILED; 4834 return QLA_FUNCTION_FAILED;
4827 } 4835 }
4828 4836
4829 qla_printk(KERN_INFO, ha, 4837 ql_log(ql_log_info, vha, 0x0092,
4830 "FW: Loading via request-firmware...\n"); 4838 "Loading via request-firmware.\n");
4831 4839
4832 rval = QLA_SUCCESS; 4840 rval = QLA_SUCCESS;
4833 4841
@@ -4839,8 +4847,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4839 4847
4840 /* Validate firmware image by checking version. */ 4848 /* Validate firmware image by checking version. */
4841 if (blob->fw->size < 8 * sizeof(uint32_t)) { 4849 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4842 qla_printk(KERN_WARNING, ha, 4850 ql_log(ql_log_fatal, vha, 0x0093,
4843 "Unable to verify integrity of firmware image (%Zd)!\n", 4851 "Unable to verify integrity of firmware image (%Zd).\n",
4844 blob->fw->size); 4852 blob->fw->size);
4845 goto fail_fw_integrity; 4853 goto fail_fw_integrity;
4846 } 4854 }
@@ -4850,11 +4858,12 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4850 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4858 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4851 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4859 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4852 dcode[3] == 0)) { 4860 dcode[3] == 0)) {
4853 qla_printk(KERN_WARNING, ha, 4861 ql_log(ql_log_fatal, vha, 0x0094,
4854 "Unable to verify integrity of firmware image!\n"); 4862 "Unable to verify integrity of firmware image (%Zd).\n",
4855 qla_printk(KERN_WARNING, ha, 4863 blob->fw->size);
4856 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4864 ql_log(ql_log_fatal, vha, 0x0095,
4857 dcode[1], dcode[2], dcode[3]); 4865 "Firmware data: %08x %08x %08x %08x.\n",
4866 dcode[0], dcode[1], dcode[2], dcode[3]);
4858 goto fail_fw_integrity; 4867 goto fail_fw_integrity;
4859 } 4868 }
4860 4869
@@ -4866,9 +4875,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4866 /* Validate firmware image size. */ 4875 /* Validate firmware image size. */
4867 fwclen += risc_size * sizeof(uint32_t); 4876 fwclen += risc_size * sizeof(uint32_t);
4868 if (blob->fw->size < fwclen) { 4877 if (blob->fw->size < fwclen) {
4869 qla_printk(KERN_WARNING, ha, 4878 ql_log(ql_log_fatal, vha, 0x0096,
4870 "Unable to verify integrity of firmware image " 4879 "Unable to verify integrity of firmware image "
4871 "(%Zd)!\n", blob->fw->size); 4880 "(%Zd).\n", blob->fw->size);
4872 4881
4873 goto fail_fw_integrity; 4882 goto fail_fw_integrity;
4874 } 4883 }
@@ -4879,9 +4888,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4879 if (dlen > risc_size) 4888 if (dlen > risc_size)
4880 dlen = risc_size; 4889 dlen = risc_size;
4881 4890
4882 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4891 ql_dbg(ql_dbg_init, vha, 0x0097,
4883 "addr %x, number of dwords 0x%x.\n", vha->host_no, 4892 "Loading risc segment@ risc addr %x "
4884 risc_addr, dlen)); 4893 "number of dwords 0x%x.\n", risc_addr, dlen);
4885 4894
4886 for (i = 0; i < dlen; i++) 4895 for (i = 0; i < dlen; i++)
4887 dcode[i] = swab32(fwcode[i]); 4896 dcode[i] = swab32(fwcode[i]);
@@ -4889,12 +4898,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4889 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4898 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4890 dlen); 4899 dlen);
4891 if (rval) { 4900 if (rval) {
4892 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4901 ql_log(ql_log_fatal, vha, 0x0098,
4893 "segment %d of firmware\n", vha->host_no, 4902 "Failed to load segment %d of firmware.\n",
4894 fragment)); 4903 fragment);
4895 qla_printk(KERN_WARNING, ha,
4896 "[ERROR] Failed to load segment %d of "
4897 "firmware\n", fragment);
4898 break; 4904 break;
4899 } 4905 }
4900 4906
@@ -4958,14 +4964,13 @@ try_blob_fw:
4958 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 4964 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4959 return rval; 4965 return rval;
4960 4966
4961 qla_printk(KERN_ERR, ha, 4967 ql_log(ql_log_info, vha, 0x0099,
4962 "FW: Attempting to fallback to golden firmware...\n"); 4968 "Attempting to fallback to golden firmware.\n");
4963 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 4969 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4964 if (rval != QLA_SUCCESS) 4970 if (rval != QLA_SUCCESS)
4965 return rval; 4971 return rval;
4966 4972
4967 qla_printk(KERN_ERR, ha, 4973 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
4968 "FW: Please update operational firmware...\n");
4969 ha->flags.running_gold_fw = 1; 4974 ha->flags.running_gold_fw = 1;
4970 4975
4971 return rval; 4976 return rval;
@@ -4992,8 +4997,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4992 continue; 4997 continue;
4993 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 4998 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4994 continue; 4999 continue;
4995 qla_printk(KERN_INFO, ha, 5000 ql_log(ql_log_info, vha, 0x8015,
4996 "Attempting retry of stop-firmware command...\n"); 5001 "Attempting retry of stop-firmware command.\n");
4997 ret = qla2x00_stop_firmware(vha); 5002 ret = qla2x00_stop_firmware(vha);
4998 } 5003 }
4999} 5004}
@@ -5028,10 +5033,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
5028 /* Login to SNS first */ 5033 /* Login to SNS first */
5029 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 5034 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
5030 if (mb[0] != MBS_COMMAND_COMPLETE) { 5035 if (mb[0] != MBS_COMMAND_COMPLETE) {
5031 DEBUG15(qla_printk(KERN_INFO, ha, 5036 ql_dbg(ql_dbg_init, vha, 0x0103,
5032 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 5037 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
5033 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS, 5038 "mb[6]=%x mb[7]=%x.\n",
5034 mb[0], mb[1], mb[2], mb[6], mb[7])); 5039 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
5035 return (QLA_FUNCTION_FAILED); 5040 return (QLA_FUNCTION_FAILED);
5036 } 5041 }
5037 5042
@@ -5151,19 +5156,23 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5151 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 5156 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5152 chksum += le32_to_cpu(*dptr++); 5157 chksum += le32_to_cpu(*dptr++);
5153 5158
5154 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 5159 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
5155 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 5160 "Contents of NVRAM:\n");
5161 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
5162 (uint8_t *)nv, ha->nvram_size);
5156 5163
5157 /* Bad NVRAM data, set defaults parameters. */ 5164 /* Bad NVRAM data, set defaults parameters. */
5158 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 5165 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5159 || nv->id[3] != ' ' || 5166 || nv->id[3] != ' ' ||
5160 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5167 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5161 /* Reset NVRAM data. */ 5168 /* Reset NVRAM data. */
5162 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 5169 ql_log(ql_log_info, vha, 0x0073,
5163 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 5170 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
5171 "version=0x%x.\n", chksum, nv->id[0],
5164 le16_to_cpu(nv->nvram_version)); 5172 le16_to_cpu(nv->nvram_version));
5165 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 5173 ql_log(ql_log_info, vha, 0x0074,
5166 "invalid -- WWPN) defaults.\n"); 5174 "Falling back to functioning (yet invalid -- WWPN) "
5175 "defaults.\n");
5167 5176
5168 /* 5177 /*
5169 * Set default initialization control block. 5178 * Set default initialization control block.
@@ -5355,12 +5364,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5355 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5364 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5356 ha->zio_mode = QLA_ZIO_MODE_6; 5365 ha->zio_mode = QLA_ZIO_MODE_6;
5357 5366
5358 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 5367 ql_log(ql_log_info, vha, 0x0075,
5359 "(%d us).\n", vha->host_no, ha->zio_mode,
5360 ha->zio_timer * 100));
5361 qla_printk(KERN_INFO, ha,
5362 "ZIO mode %d enabled; timer delay (%d us).\n", 5368 "ZIO mode %d enabled; timer delay (%d us).\n",
5363 ha->zio_mode, ha->zio_timer * 100); 5369 ha->zio_mode,
5370 ha->zio_timer * 100);
5364 5371
5365 icb->firmware_options_2 |= cpu_to_le32( 5372 icb->firmware_options_2 |= cpu_to_le32(
5366 (uint32_t)ha->zio_mode); 5373 (uint32_t)ha->zio_mode);
@@ -5369,8 +5376,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5369 } 5376 }
5370 5377
5371 if (rval) { 5378 if (rval) {
5372 DEBUG2_3(printk(KERN_WARNING 5379 ql_log(ql_log_warn, vha, 0x0076,
5373 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 5380 "NVRAM configuration failed.\n");
5374 } 5381 }
5375 return (rval); 5382 return (rval);
5376} 5383}
@@ -5393,9 +5400,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5393 5400
5394 status = qla2x00_fw_ready(vha); 5401 status = qla2x00_fw_ready(vha);
5395 if (!status) { 5402 if (!status) {
5396 qla_printk(KERN_INFO, ha, 5403 ql_log(ql_log_info, vha, 0x803c,
5397 "%s(): Start configure loop, " 5404 "Start configure loop, status =%d.\n", status);
5398 "status = %d\n", __func__, status);
5399 5405
5400 /* Issue a marker after FW becomes ready. */ 5406 /* Issue a marker after FW becomes ready. */
5401 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 5407 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -5417,9 +5423,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5417 if ((vha->device_flags & DFLG_NO_CABLE)) 5423 if ((vha->device_flags & DFLG_NO_CABLE))
5418 status = 0; 5424 status = 0;
5419 5425
5420 qla_printk(KERN_INFO, ha, 5426 ql_log(ql_log_info, vha, 0x803d,
5421 "%s(): Configure loop done, status = 0x%x\n", 5427 "Configure loop done, status = 0x%x.\n", status);
5422 __func__, status);
5423 } 5428 }
5424 5429
5425 if (!status) { 5430 if (!status) {
@@ -5455,9 +5460,9 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5455 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 5460 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5456 &ha->fce_bufs); 5461 &ha->fce_bufs);
5457 if (rval) { 5462 if (rval) {
5458 qla_printk(KERN_WARNING, ha, 5463 ql_log(ql_log_warn, vha, 0x803e,
5459 "Unable to reinitialize FCE " 5464 "Unable to reinitialize FCE (%d).\n",
5460 "(%d).\n", rval); 5465 rval);
5461 ha->flags.fce_enabled = 0; 5466 ha->flags.fce_enabled = 0;
5462 } 5467 }
5463 } 5468 }
@@ -5467,17 +5472,16 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5467 rval = qla2x00_enable_eft_trace(vha, 5472 rval = qla2x00_enable_eft_trace(vha,
5468 ha->eft_dma, EFT_NUM_BUFFERS); 5473 ha->eft_dma, EFT_NUM_BUFFERS);
5469 if (rval) { 5474 if (rval) {
5470 qla_printk(KERN_WARNING, ha, 5475 ql_log(ql_log_warn, vha, 0x803f,
5471 "Unable to reinitialize EFT " 5476 "Unable to reinitialize EFT (%d).\n",
5472 "(%d).\n", rval); 5477 rval);
5473 } 5478 }
5474 } 5479 }
5475 } 5480 }
5476 5481
5477 if (!status) { 5482 if (!status) {
5478 DEBUG(printk(KERN_INFO 5483 ql_dbg(ql_dbg_taskm, vha, 0x8040,
5479 "qla82xx_restart_isp(%ld): succeeded.\n", 5484 "qla82xx_restart_isp succeeded.\n");
5480 vha->host_no));
5481 5485
5482 spin_lock_irqsave(&ha->vport_slock, flags); 5486 spin_lock_irqsave(&ha->vport_slock, flags);
5483 list_for_each_entry(vp, &ha->vp_list, list) { 5487 list_for_each_entry(vp, &ha->vp_list, list) {
@@ -5494,8 +5498,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5494 spin_unlock_irqrestore(&ha->vport_slock, flags); 5498 spin_unlock_irqrestore(&ha->vport_slock, flags);
5495 5499
5496 } else { 5500 } else {
5497 qla_printk(KERN_INFO, ha, 5501 ql_log(ql_log_warn, vha, 0x8041,
5498 "qla82xx_restart_isp: **** FAILED ****\n"); 5502 "qla82xx_restart_isp **** FAILED ****.\n");
5499 } 5503 }
5500 5504
5501 return status; 5505 return status;
@@ -5645,9 +5649,8 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5645 if (ret == QLA_SUCCESS) 5649 if (ret == QLA_SUCCESS)
5646 fcport->fcp_prio = priority; 5650 fcport->fcp_prio = priority;
5647 else 5651 else
5648 DEBUG2(printk(KERN_WARNING 5652 ql_dbg(ql_dbg_user, vha, 0x704f,
5649 "scsi(%ld): Unable to activate fcp priority, " 5653 "Unable to activate fcp priority, ret=0x%x.\n", ret);
5650 " ret=0x%x\n", vha->host_no, ret));
5651 5654
5652 return ret; 5655 return ret;
5653} 5656}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4c8167e11f69..d2e904bc21c0 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -94,11 +94,11 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
94 94
95 /* Don't print state transitions during initial allocation of fcport */ 95 /* Don't print state transitions during initial allocation of fcport */
96 if (old_state && old_state != state) { 96 if (old_state && old_state != state) {
97 DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw, 97 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
98 "scsi(%ld): FCPort state transitioned from %s to %s - " 98 "FCPort state transitioned from %s to %s - "
99 "portid=%02x%02x%02x.\n", fcport->vha->host_no, 99 "portid=%02x%02x%02x.\n",
100 port_state_str[old_state], port_state_str[state], 100 port_state_str[old_state], port_state_str[state],
101 fcport->d_id.b.domain, fcport->d_id.b.area, 101 fcport->d_id.b.domain, fcport->d_id.b.area,
102 fcport->d_id.b.al_pa)); 102 fcport->d_id.b.al_pa);
103 } 103 }
104} 104}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7bac3cd109d6..49d6906af886 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
150 150
151 /* We only support T10 DIF right now */ 151 /* We only support T10 DIF right now */
152 if (guard != SHOST_DIX_GUARD_CRC) { 152 if (guard != SHOST_DIX_GUARD_CRC) {
153 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard)); 153 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
154 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
154 return 0; 155 return 0;
155 } 156 }
156 157
@@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp)
343 344
344 /* Send marker if required */ 345 /* Send marker if required */
345 if (vha->marker_needed != 0) { 346 if (vha->marker_needed != 0) {
346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) 347 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 != QLA_SUCCESS) 348 QLA_SUCCESS) {
348 return (QLA_FUNCTION_FAILED); 349 return (QLA_FUNCTION_FAILED);
350 }
349 vha->marker_needed = 0; 351 vha->marker_needed = 0;
350 } 352 }
351 353
@@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
490 mrk24 = NULL; 492 mrk24 = NULL;
491 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0); 493 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
492 if (mrk == NULL) { 494 if (mrk == NULL) {
493 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 495 ql_log(ql_log_warn, base_vha, 0x3026,
494 __func__, base_vha->host_no)); 496 "Failed to allocate Marker IOCB.\n");
495 497
496 return (QLA_FUNCTION_FAILED); 498 return (QLA_FUNCTION_FAILED);
497 } 499 }
@@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
547 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 549 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
548 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 550 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
549 551
550 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 552 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
551 DEBUG5(qla2x00_dump_buffer( 553 "IOCB data:\n");
552 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE)); 554 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
555 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
553 556
554 /* Adjust ring index. */ 557 /* Adjust ring index. */
555 req->ring_index++; 558 req->ring_index++;
@@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
604 * Returns the number of IOCB entries needed to store @dsds. 607 * Returns the number of IOCB entries needed to store @dsds.
605 */ 608 */
606inline uint16_t 609inline uint16_t
607qla24xx_calc_iocbs(uint16_t dsds) 610qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
608{ 611{
609 uint16_t iocbs; 612 uint16_t iocbs;
610 613
@@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds)
614 if ((dsds - 1) % 5) 617 if ((dsds - 1) % 5)
615 iocbs++; 618 iocbs++;
616 } 619 }
617 DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
618 __func__, iocbs));
619 return iocbs; 620 return iocbs;
620} 621}
621 622
@@ -712,6 +713,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
712 unsigned int protcnt) 713 unsigned int protcnt)
713{ 714{
714 struct sd_dif_tuple *spt; 715 struct sd_dif_tuple *spt;
716 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
715 unsigned char op = scsi_get_prot_op(cmd); 717 unsigned char op = scsi_get_prot_op(cmd);
716 718
717 switch (scsi_get_prot_type(cmd)) { 719 switch (scsi_get_prot_type(cmd)) {
@@ -768,9 +770,9 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
768 op == SCSI_PROT_WRITE_PASS)) { 770 op == SCSI_PROT_WRITE_PASS)) {
769 spt = page_address(sg_page(scsi_prot_sglist(cmd))) + 771 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
770 scsi_prot_sglist(cmd)[0].offset; 772 scsi_prot_sglist(cmd)[0].offset;
771 DEBUG18(printk(KERN_DEBUG 773 ql_dbg(ql_dbg_io, vha, 0x3008,
772 "%s(): LBA from user %p, lba = 0x%x\n", 774 "LBA from user %p, lba = 0x%x for cmd=%p.\n",
773 __func__, spt, (int)spt->ref_tag)); 775 spt, (int)spt->ref_tag, cmd);
774 pkt->ref_tag = swab32(spt->ref_tag); 776 pkt->ref_tag = swab32(spt->ref_tag);
775 pkt->app_tag_mask[0] = 0x0; 777 pkt->app_tag_mask[0] = 0x0;
776 pkt->app_tag_mask[1] = 0x0; 778 pkt->app_tag_mask[1] = 0x0;
@@ -789,11 +791,11 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
789 break; 791 break;
790 } 792 }
791 793
792 DEBUG18(printk(KERN_DEBUG 794 ql_dbg(ql_dbg_io, vha, 0x3009,
793 "%s(): Setting protection Tags: (BIG) ref tag = 0x%x," 795 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
794 " app tag = 0x%x, prot SG count %d , cmd lba 0x%x," 796 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
795 " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt, 797 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
796 (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd))); 798 scsi_get_prot_type(cmd), cmd);
797} 799}
798 800
799 801
@@ -809,6 +811,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
809 uint32_t *cur_dsd = dsd; 811 uint32_t *cur_dsd = dsd;
810 int i; 812 int i;
811 uint16_t used_dsds = tot_dsds; 813 uint16_t used_dsds = tot_dsds;
814 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
812 815
813 uint8_t *cp; 816 uint8_t *cp;
814 817
@@ -853,9 +856,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
853 cur_dsd = (uint32_t *)next_dsd; 856 cur_dsd = (uint32_t *)next_dsd;
854 } 857 }
855 sle_dma = sg_dma_address(sg); 858 sle_dma = sg_dma_address(sg);
856 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x," 859 ql_dbg(ql_dbg_io, vha, 0x300a,
857 " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma), 860 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
858 MSD(sle_dma), sg_dma_len(sg))); 861 cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
862 sp->cmd);
859 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 863 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
860 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 864 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
861 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 865 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
@@ -863,8 +867,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
863 867
864 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 868 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
865 cp = page_address(sg_page(sg)) + sg->offset; 869 cp = page_address(sg_page(sg)) + sg->offset;
866 DEBUG18(printk("%s(): User Data buffer= %p:\n", 870 ql_dbg(ql_dbg_io, vha, 0x300b,
867 __func__ , cp)); 871 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
868 } 872 }
869 } 873 }
870 /* Null termination */ 874 /* Null termination */
@@ -888,7 +892,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
888 struct scsi_cmnd *cmd; 892 struct scsi_cmnd *cmd;
889 uint32_t *cur_dsd = dsd; 893 uint32_t *cur_dsd = dsd;
890 uint16_t used_dsds = tot_dsds; 894 uint16_t used_dsds = tot_dsds;
891 895 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
892 uint8_t *cp; 896 uint8_t *cp;
893 897
894 898
@@ -935,10 +939,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
935 } 939 }
936 sle_dma = sg_dma_address(sg); 940 sle_dma = sg_dma_address(sg);
937 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 941 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
938 DEBUG18(printk(KERN_DEBUG 942 ql_dbg(ql_dbg_io, vha, 0x3027,
939 "%s(): %p, sg entry %d - addr =0x%x" 943 "%s(): %p, sg_entry %d - "
940 "0x%x, len =%d\n", __func__ , cur_dsd, i, 944 "addr=0x%x0x%x, len=%d.\n",
941 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg))); 945 __func__, cur_dsd, i,
946 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
942 } 947 }
943 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 948 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
944 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 949 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -946,8 +951,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
946 951
947 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 952 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
948 cp = page_address(sg_page(sg)) + sg->offset; 953 cp = page_address(sg_page(sg)) + sg->offset;
949 DEBUG18(printk("%s(): Protection Data buffer = %p:\n", 954 ql_dbg(ql_dbg_io, vha, 0x3028,
950 __func__ , cp)); 955 "%s(): Protection Data buffer = %p.\n", __func__,
956 cp);
951 } 957 }
952 avail_dsds--; 958 avail_dsds--;
953 } 959 }
@@ -996,22 +1002,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
996 *((uint32_t *)(&cmd_pkt->entry_type)) = 1002 *((uint32_t *)(&cmd_pkt->entry_type)) =
997 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); 1003 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
998 1004
1005 vha = sp->fcport->vha;
1006 ha = vha->hw;
1007
999 /* No data transfer */ 1008 /* No data transfer */
1000 data_bytes = scsi_bufflen(cmd); 1009 data_bytes = scsi_bufflen(cmd);
1001 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1010 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1002 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1003 __func__, data_bytes));
1004 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1011 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1005 return QLA_SUCCESS; 1012 return QLA_SUCCESS;
1006 } 1013 }
1007 1014
1008 vha = sp->fcport->vha;
1009 ha = vha->hw;
1010
1011 DEBUG18(printk(KERN_DEBUG
1012 "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
1013 vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
1014
1015 cmd_pkt->vp_index = sp->fcport->vp_idx; 1015 cmd_pkt->vp_index = sp->fcport->vp_idx;
1016 1016
1017 /* Set transfer direction */ 1017 /* Set transfer direction */
@@ -1056,8 +1056,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1056 1056
1057 /* Determine SCSI command length -- align to 4 byte boundary */ 1057 /* Determine SCSI command length -- align to 4 byte boundary */
1058 if (cmd->cmd_len > 16) { 1058 if (cmd->cmd_len > 16) {
1059 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1060 __func__));
1061 additional_fcpcdb_len = cmd->cmd_len - 16; 1059 additional_fcpcdb_len = cmd->cmd_len - 16;
1062 if ((cmd->cmd_len % 4) != 0) { 1060 if ((cmd->cmd_len % 4) != 0) {
1063 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1061 /* SCSI cmd > 16 bytes must be multiple of 4 */
@@ -1108,11 +1106,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1108 1106
1109 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1107 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1110 1108
1111 DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1112 "entries %d, data bytes %d, Protection entries %d\n",
1113 __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1114 data_bytes, tot_prot_dsds));
1115
1116 /* Compute dif len and adjust data len to incude protection */ 1109 /* Compute dif len and adjust data len to incude protection */
1117 total_bytes = data_bytes; 1110 total_bytes = data_bytes;
1118 dif_bytes = 0; 1111 dif_bytes = 0;
@@ -1150,14 +1143,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1150 additional_fcpcdb_len); 1143 additional_fcpcdb_len);
1151 *fcp_dl = htonl(total_bytes); 1144 *fcp_dl = htonl(total_bytes);
1152 1145
1153 DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1154 " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1155 vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1156 crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1157
1158 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1146 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1159 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1160 __func__, data_bytes));
1161 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1147 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1162 return QLA_SUCCESS; 1148 return QLA_SUCCESS;
1163 } 1149 }
@@ -1182,8 +1168,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1182 return QLA_SUCCESS; 1168 return QLA_SUCCESS;
1183 1169
1184crc_queuing_error: 1170crc_queuing_error:
1185 DEBUG18(qla_printk(KERN_INFO, ha,
1186 "CMD sent FAILED crc_q error:sp = %p\n", sp));
1187 /* Cleanup will be performed by the caller */ 1171 /* Cleanup will be performed by the caller */
1188 1172
1189 return QLA_FUNCTION_FAILED; 1173 return QLA_FUNCTION_FAILED;
@@ -1225,8 +1209,8 @@ qla24xx_start_scsi(srb_t *sp)
1225 1209
1226 /* Send marker if required */ 1210 /* Send marker if required */
1227 if (vha->marker_needed != 0) { 1211 if (vha->marker_needed != 0) {
1228 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) 1212 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1229 != QLA_SUCCESS) 1213 QLA_SUCCESS)
1230 return QLA_FUNCTION_FAILED; 1214 return QLA_FUNCTION_FAILED;
1231 vha->marker_needed = 0; 1215 vha->marker_needed = 0;
1232 } 1216 }
@@ -1243,8 +1227,9 @@ qla24xx_start_scsi(srb_t *sp)
1243 if (!req->outstanding_cmds[handle]) 1227 if (!req->outstanding_cmds[handle])
1244 break; 1228 break;
1245 } 1229 }
1246 if (index == MAX_OUTSTANDING_COMMANDS) 1230 if (index == MAX_OUTSTANDING_COMMANDS) {
1247 goto queuing_error; 1231 goto queuing_error;
1232 }
1248 1233
1249 /* Map the sg table so we have an accurate count of sg entries needed */ 1234 /* Map the sg table so we have an accurate count of sg entries needed */
1250 if (scsi_sg_count(cmd)) { 1235 if (scsi_sg_count(cmd)) {
@@ -1256,8 +1241,7 @@ qla24xx_start_scsi(srb_t *sp)
1256 nseg = 0; 1241 nseg = 0;
1257 1242
1258 tot_dsds = nseg; 1243 tot_dsds = nseg;
1259 1244 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1260 req_cnt = qla24xx_calc_iocbs(tot_dsds);
1261 if (req->cnt < (req_cnt + 2)) { 1245 if (req->cnt < (req_cnt + 2)) {
1262 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 1246 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1263 1247
@@ -1322,7 +1306,6 @@ qla24xx_start_scsi(srb_t *sp)
1322 /* Specify response queue number where completion should happen */ 1306 /* Specify response queue number where completion should happen */
1323 cmd_pkt->entry_status = (uint8_t) rsp->id; 1307 cmd_pkt->entry_status = (uint8_t) rsp->id;
1324 wmb(); 1308 wmb();
1325
1326 /* Adjust ring index. */ 1309 /* Adjust ring index. */
1327 req->ring_index++; 1310 req->ring_index++;
1328 if (req->ring_index == req->length) { 1311 if (req->ring_index == req->length) {
@@ -1534,9 +1517,6 @@ queuing_error:
1534 /* Cleanup will be performed by the caller (queuecommand) */ 1517 /* Cleanup will be performed by the caller (queuecommand) */
1535 1518
1536 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1519 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537
1538 DEBUG18(qla_printk(KERN_INFO, ha,
1539 "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1540 return QLA_FUNCTION_FAILED; 1520 return QLA_FUNCTION_FAILED;
1541} 1521}
1542 1522
@@ -1581,8 +1561,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1581 if (!req->outstanding_cmds[handle]) 1561 if (!req->outstanding_cmds[handle])
1582 break; 1562 break;
1583 } 1563 }
1584 if (index == MAX_OUTSTANDING_COMMANDS) 1564 if (index == MAX_OUTSTANDING_COMMANDS) {
1565 ql_log(ql_log_warn, vha, 0x700b,
1566 "No room on oustanding cmd array.\n");
1585 goto queuing_error; 1567 goto queuing_error;
1568 }
1586 1569
1587 /* Prep command array. */ 1570 /* Prep command array. */
1588 req->current_outstanding_cmd = handle; 1571 req->current_outstanding_cmd = handle;
@@ -1999,8 +1982,11 @@ qla2x00_start_sp(srb_t *sp)
1999 rval = QLA_FUNCTION_FAILED; 1982 rval = QLA_FUNCTION_FAILED;
2000 spin_lock_irqsave(&ha->hardware_lock, flags); 1983 spin_lock_irqsave(&ha->hardware_lock, flags);
2001 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); 1984 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2002 if (!pkt) 1985 if (!pkt) {
1986 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
1987 "qla2x00_alloc_iocbs failed.\n");
2003 goto done; 1988 goto done;
1989 }
2004 1990
2005 rval = QLA_SUCCESS; 1991 rval = QLA_SUCCESS;
2006 switch (ctx->type) { 1992 switch (ctx->type) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ae8e298746ba..b16b7725dee0 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -45,7 +45,7 @@ qla2100_intr_handler(int irq, void *dev_id)
45 rsp = (struct rsp_que *) dev_id; 45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) { 46 if (!rsp) {
47 printk(KERN_INFO 47 printk(KERN_INFO
48 "%s(): NULL response queue pointer\n", __func__); 48 "%s(): NULL response queue pointer.\n", __func__);
49 return (IRQ_NONE); 49 return (IRQ_NONE);
50 } 50 }
51 51
@@ -91,9 +91,9 @@ qla2100_intr_handler(int irq, void *dev_id)
91 qla2x00_async_event(vha, rsp, mb); 91 qla2x00_async_event(vha, rsp, mb);
92 } else { 92 } else {
93 /*EMPTY*/ 93 /*EMPTY*/
94 DEBUG2(printk("scsi(%ld): Unrecognized " 94 ql_dbg(ql_dbg_async, vha, 0x5025,
95 "interrupt type (%d).\n", 95 "Unrecognized interrupt type (%d).\n",
96 vha->host_no, mb[0])); 96 mb[0]);
97 } 97 }
98 /* Release mailbox registers. */ 98 /* Release mailbox registers. */
99 WRT_REG_WORD(&reg->semaphore, 0); 99 WRT_REG_WORD(&reg->semaphore, 0);
@@ -142,7 +142,7 @@ qla2300_intr_handler(int irq, void *dev_id)
142 rsp = (struct rsp_que *) dev_id; 142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) { 143 if (!rsp) {
144 printk(KERN_INFO 144 printk(KERN_INFO
145 "%s(): NULL response queue pointer\n", __func__); 145 "%s(): NULL response queue pointer.\n", __func__);
146 return (IRQ_NONE); 146 return (IRQ_NONE);
147 } 147 }
148 148
@@ -160,11 +160,13 @@ qla2300_intr_handler(int irq, void *dev_id)
160 160
161 hccr = RD_REG_WORD(&reg->hccr); 161 hccr = RD_REG_WORD(&reg->hccr);
162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163 qla_printk(KERN_INFO, ha, "Parity error -- " 163 ql_log(ql_log_warn, vha, 0x5026,
164 "HCCR=%x, Dumping firmware!\n", hccr); 164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr);
165 else 166 else
166 qla_printk(KERN_INFO, ha, "RISC paused -- " 167 ql_log(ql_log_warn, vha, 0x5027,
167 "HCCR=%x, Dumping firmware!\n", hccr); 168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr);
168 170
169 /* 171 /*
170 * Issue a "HARD" reset in order for the RISC 172 * Issue a "HARD" reset in order for the RISC
@@ -213,9 +215,8 @@ qla2300_intr_handler(int irq, void *dev_id)
213 qla2x00_async_event(vha, rsp, mb); 215 qla2x00_async_event(vha, rsp, mb);
214 break; 216 break;
215 default: 217 default:
216 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 218 ql_dbg(ql_dbg_async, vha, 0x5028,
217 "(%d).\n", 219 "Unrecognized interrupt type (%d).\n", stat & 0xff);
218 vha->host_no, stat & 0xff));
219 break; 220 break;
220 } 221 }
221 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -262,11 +263,11 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
262 } 263 }
263 264
264 if (ha->mcp) { 265 if (ha->mcp) {
265 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 266 ql_dbg(ql_dbg_async, vha, 0x5000,
266 __func__, vha->host_no, ha->mcp->mb[0])); 267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
267 } else { 268 } else {
268 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 269 ql_dbg(ql_dbg_async, vha, 0x5001,
269 __func__, vha->host_no)); 270 "MBX pointer ERROR.\n");
270 } 271 }
271} 272}
272 273
@@ -285,22 +286,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
285 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
286 mb[cnt] = RD_REG_WORD(wptr); 287 mb[cnt] = RD_REG_WORD(wptr);
287 288
288 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 289 ql_dbg(ql_dbg_async, vha, 0x5021,
289 "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, 290 "Inter-Driver Commucation %s -- "
290 event[aen & 0xff], 291 "%04x %04x %04x %04x %04x %04x %04x.\n",
291 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); 292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
293 mb[4], mb[5], mb[6]);
292 294
293 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 295 /* Acknowledgement needed? [Notify && non-zero timeout]. */
294 timeout = (descr >> 8) & 0xf; 296 timeout = (descr >> 8) & 0xf;
295 if (aen != MBA_IDC_NOTIFY || !timeout) 297 if (aen != MBA_IDC_NOTIFY || !timeout)
296 return; 298 return;
297 299
298 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 300 ql_dbg(ql_dbg_async, vha, 0x5022,
299 "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); 301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
302 vha->host_no, event[aen & 0xff], timeout);
300 303
301 rval = qla2x00_post_idc_ack_work(vha, mb); 304 rval = qla2x00_post_idc_ack_work(vha, mb);
302 if (rval != QLA_SUCCESS) 305 if (rval != QLA_SUCCESS)
303 qla_printk(KERN_WARNING, vha->hw, 306 ql_log(ql_log_warn, vha, 0x5023,
304 "IDC failed to post ACK.\n"); 307 "IDC failed to post ACK.\n");
305} 308}
306 309
@@ -393,15 +396,15 @@ skip_rio:
393 break; 396 break;
394 397
395 case MBA_RESET: /* Reset */ 398 case MBA_RESET: /* Reset */
396 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", 399 ql_dbg(ql_dbg_async, vha, 0x5002,
397 vha->host_no)); 400 "Asynchronous RESET.\n");
398 401
399 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
400 break; 403 break;
401 404
402 case MBA_SYSTEM_ERR: /* System Error */ 405 case MBA_SYSTEM_ERR: /* System Error */
403 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0; 406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
404 qla_printk(KERN_INFO, ha, 407 ql_log(ql_log_warn, vha, 0x5003,
405 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
406 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
407 410
@@ -409,7 +412,7 @@ skip_rio:
409 412
410 if (IS_FWI2_CAPABLE(ha)) { 413 if (IS_FWI2_CAPABLE(ha)) {
411 if (mb[1] == 0 && mb[2] == 0) { 414 if (mb[1] == 0 && mb[2] == 0) {
412 qla_printk(KERN_ERR, ha, 415 ql_log(ql_log_fatal, vha, 0x5004,
413 "Unrecoverable Hardware Error: adapter " 416 "Unrecoverable Hardware Error: adapter "
414 "marked OFFLINE!\n"); 417 "marked OFFLINE!\n");
415 vha->flags.online = 0; 418 vha->flags.online = 0;
@@ -422,7 +425,7 @@ skip_rio:
422 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
423 } 426 }
424 } else if (mb[1] == 0) { 427 } else if (mb[1] == 0) {
425 qla_printk(KERN_INFO, ha, 428 ql_log(ql_log_fatal, vha, 0x5005,
426 "Unrecoverable Hardware Error: adapter marked " 429 "Unrecoverable Hardware Error: adapter marked "
427 "OFFLINE!\n"); 430 "OFFLINE!\n");
428 vha->flags.online = 0; 431 vha->flags.online = 0;
@@ -431,31 +434,27 @@ skip_rio:
431 break; 434 break;
432 435
433 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
434 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", 437 ql_log(ql_log_warn, vha, 0x5006,
435 vha->host_no, mb[1])); 438 "ISP Request Transfer Error (%x).\n", mb[1]);
436 qla_printk(KERN_WARNING, ha,
437 "ISP Request Transfer Error (%x).\n", mb[1]);
438 439
439 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
440 break; 441 break;
441 442
442 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
443 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 444 ql_log(ql_log_warn, vha, 0x5007,
444 vha->host_no)); 445 "ISP Response Transfer Error.\n");
445 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
446 446
447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
448 break; 448 break;
449 449
450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
451 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 451 ql_dbg(ql_dbg_async, vha, 0x5008,
452 vha->host_no)); 452 "Asynchronous WAKEUP_THRES.\n");
453 break; 453 break;
454 454
455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
456 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no, 456 ql_log(ql_log_info, vha, 0x5009,
457 mb[1])); 457 "LIP occurred (%x).\n", mb[1]);
458 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
459 458
460 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
461 atomic_set(&vha->loop_state, LOOP_DOWN); 460 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -488,10 +487,8 @@ skip_rio:
488 ha->link_data_rate = mb[1]; 487 ha->link_data_rate = mb[1];
489 } 488 }
490 489
491 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 490 ql_log(ql_log_info, vha, 0x500a,
492 vha->host_no, link_speed)); 491 "LOOP UP detected (%s Gbps).\n", link_speed);
493 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
494 link_speed);
495 492
496 vha->flags.management_server_logged_in = 0; 493 vha->flags.management_server_logged_in = 0;
497 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -500,12 +497,9 @@ skip_rio:
500 case MBA_LOOP_DOWN: /* Loop Down Event */ 497 case MBA_LOOP_DOWN: /* Loop Down Event */
501 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; 498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
502 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; 499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
503 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 500 ql_log(ql_log_info, vha, 0x500b,
504 "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], 501 "LOOP DOWN detected (%x %x %x %x).\n",
505 mbx)); 502 mb[1], mb[2], mb[3], mbx);
506 qla_printk(KERN_INFO, ha,
507 "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
508 mbx);
509 503
510 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
511 atomic_set(&vha->loop_state, LOOP_DOWN); 505 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -525,9 +519,7 @@ skip_rio:
525 break; 519 break;
526 520
527 case MBA_LIP_RESET: /* LIP reset occurred */ 521 case MBA_LIP_RESET: /* LIP reset occurred */
528 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 522 ql_log(ql_log_info, vha, 0x500c,
529 vha->host_no, mb[1]));
530 qla_printk(KERN_INFO, ha,
531 "LIP reset occurred (%x).\n", mb[1]); 523 "LIP reset occurred (%x).\n", mb[1]);
532 524
533 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -554,14 +546,15 @@ skip_rio:
554 break; 546 break;
555 547
556 if (IS_QLA8XXX_TYPE(ha)) { 548 if (IS_QLA8XXX_TYPE(ha)) {
557 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " 549 ql_dbg(ql_dbg_async, vha, 0x500d,
558 "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); 550 "DCBX Completed -- %04x %04x %04x.\n",
551 mb[1], mb[2], mb[3]);
559 if (ha->notify_dcbx_comp) 552 if (ha->notify_dcbx_comp)
560 complete(&ha->dcbx_comp); 553 complete(&ha->dcbx_comp);
561 554
562 } else 555 } else
563 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE " 556 ql_dbg(ql_dbg_async, vha, 0x500e,
564 "received.\n", vha->host_no)); 557 "Asynchronous P2P MODE received.\n");
565 558
566 /* 559 /*
567 * Until there's a transition from loop down to loop up, treat 560 * Until there's a transition from loop down to loop up, treat
@@ -594,10 +587,7 @@ skip_rio:
594 if (IS_QLA2100(ha)) 587 if (IS_QLA2100(ha))
595 break; 588 break;
596 589
597 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 590 ql_log(ql_log_info, vha, 0x500f,
598 "received.\n",
599 vha->host_no));
600 qla_printk(KERN_INFO, ha,
601 "Configuration change detected: value=%x.\n", mb[1]); 591 "Configuration change detected: value=%x.\n", mb[1]);
602 592
603 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -640,11 +630,9 @@ skip_rio:
640 630
641 /* Global event -- port logout or port unavailable. */ 631 /* Global event -- port logout or port unavailable. */
642 if (mb[1] == 0xffff && mb[2] == 0x7) { 632 if (mb[1] == 0xffff && mb[2] == 0x7) {
643 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 633 ql_dbg(ql_dbg_async, vha, 0x5010,
644 vha->host_no)); 634 "Port unavailable %04x %04x %04x.\n",
645 DEBUG(printk(KERN_INFO 635 mb[1], mb[2], mb[3]);
646 "scsi(%ld): Port unavailable %04x %04x %04x.\n",
647 vha->host_no, mb[1], mb[2], mb[3]));
648 636
649 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
650 atomic_set(&vha->loop_state, LOOP_DOWN); 638 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -674,17 +662,15 @@ skip_rio:
674 atomic_set(&vha->loop_down_timer, 0); 662 atomic_set(&vha->loop_down_timer, 0);
675 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 663 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
676 atomic_read(&vha->loop_state) != LOOP_DEAD) { 664 atomic_read(&vha->loop_state) != LOOP_DEAD) {
677 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 665 ql_dbg(ql_dbg_async, vha, 0x5011,
678 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1], 666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
679 mb[2], mb[3])); 667 mb[1], mb[2], mb[3]);
680 break; 668 break;
681 } 669 }
682 670
683 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 671 ql_dbg(ql_dbg_async, vha, 0x5012,
684 vha->host_no)); 672 "Port database changed %04x %04x %04x.\n",
685 DEBUG(printk(KERN_INFO 673 mb[1], mb[2], mb[3]);
686 "scsi(%ld): Port database changed %04x %04x %04x.\n",
687 vha->host_no, mb[1], mb[2], mb[3]));
688 674
689 /* 675 /*
690 * Mark all devices as missing so we will login again. 676 * Mark all devices as missing so we will login again.
@@ -707,20 +693,17 @@ skip_rio:
707 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
708 break; 694 break;
709 695
710 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 696 ql_dbg(ql_dbg_async, vha, 0x5013,
711 vha->host_no)); 697 "RSCN database changed -- %04x %04x %04x.\n",
712 DEBUG(printk(KERN_INFO 698 mb[1], mb[2], mb[3]);
713 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
714 vha->host_no, mb[1], mb[2], mb[3]));
715 699
716 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
717 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
718 | vha->d_id.b.al_pa; 702 | vha->d_id.b.al_pa;
719 if (rscn_entry == host_pid) { 703 if (rscn_entry == host_pid) {
720 DEBUG(printk(KERN_INFO 704 ql_dbg(ql_dbg_async, vha, 0x5014,
721 "scsi(%ld): Ignoring RSCN update to local host " 705 "Ignoring RSCN update to local host "
722 "port ID (%06x)\n", 706 "port ID (%06x).\n", host_pid);
723 vha->host_no, host_pid));
724 break; 707 break;
725 } 708 }
726 709
@@ -747,8 +730,8 @@ skip_rio:
747 730
748 /* case MBA_RIO_RESPONSE: */ 731 /* case MBA_RIO_RESPONSE: */
749 case MBA_ZIO_RESPONSE: 732 case MBA_ZIO_RESPONSE:
750 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n", 733 ql_dbg(ql_dbg_async, vha, 0x5015,
751 vha->host_no)); 734 "[R|Z]IO update completion.\n");
752 735
753 if (IS_FWI2_CAPABLE(ha)) 736 if (IS_FWI2_CAPABLE(ha))
754 qla24xx_process_response_queue(vha, rsp); 737 qla24xx_process_response_queue(vha, rsp);
@@ -757,61 +740,68 @@ skip_rio:
757 break; 740 break;
758 741
759 case MBA_DISCARD_RND_FRAME: 742 case MBA_DISCARD_RND_FRAME:
760 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 743 ql_dbg(ql_dbg_async, vha, 0x5016,
761 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); 744 "Discard RND Frame -- %04x %04x %04x.\n",
745 mb[1], mb[2], mb[3]);
762 break; 746 break;
763 747
764 case MBA_TRACE_NOTIFICATION: 748 case MBA_TRACE_NOTIFICATION:
765 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 749 ql_dbg(ql_dbg_async, vha, 0x5017,
766 vha->host_no, mb[1], mb[2])); 750 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
767 break; 751 break;
768 752
769 case MBA_ISP84XX_ALERT: 753 case MBA_ISP84XX_ALERT:
770 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 754 ql_dbg(ql_dbg_async, vha, 0x5018,
771 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 755 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
756 mb[1], mb[2], mb[3]);
772 757
773 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 758 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
774 switch (mb[1]) { 759 switch (mb[1]) {
775 case A84_PANIC_RECOVERY: 760 case A84_PANIC_RECOVERY:
776 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery " 761 ql_log(ql_log_info, vha, 0x5019,
777 "%04x %04x\n", mb[2], mb[3]); 762 "Alert 84XX: panic recovery %04x %04x.\n",
763 mb[2], mb[3]);
778 break; 764 break;
779 case A84_OP_LOGIN_COMPLETE: 765 case A84_OP_LOGIN_COMPLETE:
780 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 766 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
781 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 767 ql_log(ql_log_info, vha, 0x501a,
782 "firmware version %x\n", ha->cs84xx->op_fw_version)); 768 "Alert 84XX: firmware version %x.\n",
769 ha->cs84xx->op_fw_version);
783 break; 770 break;
784 case A84_DIAG_LOGIN_COMPLETE: 771 case A84_DIAG_LOGIN_COMPLETE:
785 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 772 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
786 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 773 ql_log(ql_log_info, vha, 0x501b,
787 "diagnostic firmware version %x\n", 774 "Alert 84XX: diagnostic firmware version %x.\n",
788 ha->cs84xx->diag_fw_version)); 775 ha->cs84xx->diag_fw_version);
789 break; 776 break;
790 case A84_GOLD_LOGIN_COMPLETE: 777 case A84_GOLD_LOGIN_COMPLETE:
791 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 778 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
792 ha->cs84xx->fw_update = 1; 779 ha->cs84xx->fw_update = 1;
793 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold " 780 ql_log(ql_log_info, vha, 0x501c,
794 "firmware version %x\n", 781 "Alert 84XX: gold firmware version %x.\n",
795 ha->cs84xx->gold_fw_version)); 782 ha->cs84xx->gold_fw_version);
796 break; 783 break;
797 default: 784 default:
798 qla_printk(KERN_ERR, ha, 785 ql_log(ql_log_warn, vha, 0x501d,
799 "Alert 84xx: Invalid Alert %04x %04x %04x\n", 786 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
800 mb[1], mb[2], mb[3]); 787 mb[1], mb[2], mb[3]);
801 } 788 }
802 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 789 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
803 break; 790 break;
804 case MBA_DCBX_START: 791 case MBA_DCBX_START:
805 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n", 792 ql_dbg(ql_dbg_async, vha, 0x501e,
806 vha->host_no, mb[1], mb[2], mb[3])); 793 "DCBX Started -- %04x %04x %04x.\n",
794 mb[1], mb[2], mb[3]);
807 break; 795 break;
808 case MBA_DCBX_PARAM_UPDATE: 796 case MBA_DCBX_PARAM_UPDATE:
809 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- " 797 ql_dbg(ql_dbg_async, vha, 0x501f,
810 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 798 "DCBX Parameters Updated -- %04x %04x %04x.\n",
799 mb[1], mb[2], mb[3]);
811 break; 800 break;
812 case MBA_FCF_CONF_ERR: 801 case MBA_FCF_CONF_ERR:
813 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- " 802 ql_dbg(ql_dbg_async, vha, 0x5020,
814 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 803 "FCF Configuration Error -- %04x %04x %04x.\n",
804 mb[1], mb[2], mb[3]);
815 break; 805 break;
816 case MBA_IDC_COMPLETE: 806 case MBA_IDC_COMPLETE:
817 case MBA_IDC_NOTIFY: 807 case MBA_IDC_NOTIFY:
@@ -838,10 +828,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
838 828
839 /* Validate handle. */ 829 /* Validate handle. */
840 if (index >= MAX_OUTSTANDING_COMMANDS) { 830 if (index >= MAX_OUTSTANDING_COMMANDS) {
841 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 831 ql_log(ql_log_warn, vha, 0x3014,
842 vha->host_no, index)); 832 "Invalid SCSI command index (%x).\n", index);
843 qla_printk(KERN_WARNING, ha,
844 "Invalid SCSI completion handle %d.\n", index);
845 833
846 if (IS_QLA82XX(ha)) 834 if (IS_QLA82XX(ha))
847 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 835 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -859,10 +847,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
859 sp->cmd->result = DID_OK << 16; 847 sp->cmd->result = DID_OK << 16;
860 qla2x00_sp_compl(ha, sp); 848 qla2x00_sp_compl(ha, sp);
861 } else { 849 } else {
862 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 850 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
863 " handle(0x%x)\n", vha->host_no, req->id, index));
864 qla_printk(KERN_WARNING, ha,
865 "Invalid ISP SCSI completion handle\n");
866 851
867 if (IS_QLA82XX(ha)) 852 if (IS_QLA82XX(ha))
868 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 853 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -882,8 +867,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
882 867
883 index = LSW(pkt->handle); 868 index = LSW(pkt->handle);
884 if (index >= MAX_OUTSTANDING_COMMANDS) { 869 if (index >= MAX_OUTSTANDING_COMMANDS) {
885 qla_printk(KERN_WARNING, ha, 870 ql_log(ql_log_warn, vha, 0x5031,
886 "%s: Invalid completion handle (%x).\n", func, index); 871 "Invalid command index (%x).\n", index);
887 if (IS_QLA82XX(ha)) 872 if (IS_QLA82XX(ha))
888 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 873 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
889 else 874 else
@@ -892,15 +877,13 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
892 } 877 }
893 sp = req->outstanding_cmds[index]; 878 sp = req->outstanding_cmds[index];
894 if (!sp) { 879 if (!sp) {
895 qla_printk(KERN_WARNING, ha, 880 ql_log(ql_log_warn, vha, 0x5032,
896 "%s: Invalid completion handle (%x) -- timed-out.\n", func, 881 "Invalid completion handle (%x) -- timed-out.\n", index);
897 index);
898 return sp; 882 return sp;
899 } 883 }
900 if (sp->handle != index) { 884 if (sp->handle != index) {
901 qla_printk(KERN_WARNING, ha, 885 ql_log(ql_log_warn, vha, 0x5033,
902 "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle, 886 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
903 index);
904 return NULL; 887 return NULL;
905 } 888 }
906 889
@@ -937,17 +920,17 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
937 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 920 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
938 QLA_LOGIO_LOGIN_RETRIED : 0; 921 QLA_LOGIO_LOGIN_RETRIED : 0;
939 if (mbx->entry_status) { 922 if (mbx->entry_status) {
940 DEBUG2(printk(KERN_WARNING 923 ql_dbg(ql_dbg_async, vha, 0x5043,
941 "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x " 924 "Async-%s error entry - portid=%02x%02x%02x "
942 "entry-status=%x status=%x state-flag=%x " 925 "entry-status=%x status=%x state-flag=%x "
943 "status-flags=%x.\n", 926 "status-flags=%x.\n",
944 fcport->vha->host_no, sp->handle, type, 927 type, fcport->d_id.b.domain, fcport->d_id.b.area,
945 fcport->d_id.b.domain, fcport->d_id.b.area,
946 fcport->d_id.b.al_pa, mbx->entry_status, 928 fcport->d_id.b.al_pa, mbx->entry_status,
947 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 929 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
948 le16_to_cpu(mbx->status_flags))); 930 le16_to_cpu(mbx->status_flags));
949 931
950 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); 932 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
933 (uint8_t *)mbx, sizeof(*mbx));
951 934
952 goto logio_done; 935 goto logio_done;
953 } 936 }
@@ -957,12 +940,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
957 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 940 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
958 status = 0; 941 status = 0;
959 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 942 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
960 DEBUG2(printk(KERN_DEBUG 943 ql_dbg(ql_dbg_async, vha, 0x5045,
961 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " 944 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
962 "mbx1=%x.\n", 945 type, fcport->d_id.b.domain, fcport->d_id.b.area,
963 fcport->vha->host_no, sp->handle, type, 946 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
964 fcport->d_id.b.domain, fcport->d_id.b.area,
965 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
966 947
967 data[0] = MBS_COMMAND_COMPLETE; 948 data[0] = MBS_COMMAND_COMPLETE;
968 if (ctx->type == SRB_LOGIN_CMD) { 949 if (ctx->type == SRB_LOGIN_CMD) {
@@ -987,14 +968,14 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
987 break; 968 break;
988 } 969 }
989 970
990 DEBUG2(printk(KERN_WARNING 971 ql_log(ql_log_warn, vha, 0x5046,
991 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x " 972 "Async-%s failed - portid=%02x%02x%02x status=%x "
992 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", 973 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
993 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, 974 type, fcport->d_id.b.domain,
994 fcport->d_id.b.area, fcport->d_id.b.al_pa, status, 975 fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
995 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 976 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
996 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 977 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
997 le16_to_cpu(mbx->mb7))); 978 le16_to_cpu(mbx->mb7));
998 979
999logio_done: 980logio_done:
1000 lio->done(sp); 981 lio->done(sp);
@@ -1025,9 +1006,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1025 type = "ct pass-through"; 1006 type = "ct pass-through";
1026 break; 1007 break;
1027 default: 1008 default:
1028 qla_printk(KERN_WARNING, ha, 1009 ql_log(ql_log_warn, vha, 0x5047,
1029 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1010 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1030 sp_bsg->type);
1031 return; 1011 return;
1032 } 1012 }
1033 1013
@@ -1045,20 +1025,20 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1045 bsg_job->reply->reply_payload_rcv_len = 1025 bsg_job->reply->reply_payload_rcv_len =
1046 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1026 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1047 1027
1048 DEBUG2(qla_printk(KERN_WARNING, ha, 1028 ql_log(ql_log_warn, vha, 0x5048,
1049 "scsi(%ld): CT pass-through-%s error " 1029 "CT pass-through-%s error "
1050 "comp_status-status=0x%x total_byte = 0x%x.\n", 1030 "comp_status-status=0x%x total_byte = 0x%x.\n",
1051 vha->host_no, type, comp_status, 1031 type, comp_status,
1052 bsg_job->reply->reply_payload_rcv_len)); 1032 bsg_job->reply->reply_payload_rcv_len);
1053 } else { 1033 } else {
1054 DEBUG2(qla_printk(KERN_WARNING, ha, 1034 ql_log(ql_log_warn, vha, 0x5049,
1055 "scsi(%ld): CT pass-through-%s error " 1035 "CT pass-through-%s error "
1056 "comp_status-status=0x%x.\n", 1036 "comp_status-status=0x%x.\n", type, comp_status);
1057 vha->host_no, type, comp_status));
1058 bsg_job->reply->result = DID_ERROR << 16; 1037 bsg_job->reply->result = DID_ERROR << 16;
1059 bsg_job->reply->reply_payload_rcv_len = 0; 1038 bsg_job->reply->reply_payload_rcv_len = 0;
1060 } 1039 }
1061 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); 1040 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
1041 (uint8_t *)pkt, sizeof(*pkt));
1062 } else { 1042 } else {
1063 bsg_job->reply->result = DID_OK << 16; 1043 bsg_job->reply->result = DID_OK << 16;
1064 bsg_job->reply->reply_payload_rcv_len = 1044 bsg_job->reply->reply_payload_rcv_len =
@@ -1110,9 +1090,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1110 type = "ct pass-through"; 1090 type = "ct pass-through";
1111 break; 1091 break;
1112 default: 1092 default:
1113 qla_printk(KERN_WARNING, ha, 1093 ql_log(ql_log_warn, vha, 0x503e,
1114 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1094 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1115 sp_bsg->type);
1116 return; 1095 return;
1117 } 1096 }
1118 1097
@@ -1132,27 +1111,31 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1132 bsg_job->reply->reply_payload_rcv_len = 1111 bsg_job->reply->reply_payload_rcv_len =
1133 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1112 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1134 1113
1135 DEBUG2(qla_printk(KERN_WARNING, ha, 1114 ql_log(ql_log_info, vha, 0x503f,
1136 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " 1115 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1137 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1116 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1138 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2], 1117 type, comp_status, fw_status[1], fw_status[2],
1139 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count))); 1118 le16_to_cpu(((struct els_sts_entry_24xx *)
1119 pkt)->total_byte_count));
1140 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1120 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1141 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1121 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1142 } 1122 }
1143 else { 1123 else {
1144 DEBUG2(qla_printk(KERN_WARNING, ha, 1124 ql_log(ql_log_info, vha, 0x5040,
1145 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " 1125 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1146 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1126 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1147 vha->host_no, sp->handle, type, comp_status, 1127 type, comp_status,
1148 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1), 1128 le16_to_cpu(((struct els_sts_entry_24xx *)
1149 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2))); 1129 pkt)->error_subcode_1),
1130 le16_to_cpu(((struct els_sts_entry_24xx *)
1131 pkt)->error_subcode_2));
1150 bsg_job->reply->result = DID_ERROR << 16; 1132 bsg_job->reply->result = DID_ERROR << 16;
1151 bsg_job->reply->reply_payload_rcv_len = 0; 1133 bsg_job->reply->reply_payload_rcv_len = 0;
1152 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1134 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1153 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1135 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1154 } 1136 }
1155 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); 1137 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
1138 (uint8_t *)pkt, sizeof(*pkt));
1156 } 1139 }
1157 else { 1140 else {
1158 bsg_job->reply->result = DID_OK << 16; 1141 bsg_job->reply->result = DID_OK << 16;
@@ -1201,25 +1184,24 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1201 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1184 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1202 QLA_LOGIO_LOGIN_RETRIED : 0; 1185 QLA_LOGIO_LOGIN_RETRIED : 0;
1203 if (logio->entry_status) { 1186 if (logio->entry_status) {
1204 DEBUG2(printk(KERN_WARNING 1187 ql_log(ql_log_warn, vha, 0x5034,
1205 "scsi(%ld:%x): Async-%s error entry - " 1188 "Async-%s error entry - "
1206 "portid=%02x%02x%02x entry-status=%x.\n", 1189 "portid=%02x%02x%02x entry-status=%x.\n",
1207 fcport->vha->host_no, sp->handle, type, 1190 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1208 fcport->d_id.b.domain, fcport->d_id.b.area, 1191 fcport->d_id.b.al_pa, logio->entry_status);
1209 fcport->d_id.b.al_pa, logio->entry_status)); 1192 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
1210 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); 1193 (uint8_t *)logio, sizeof(*logio));
1211 1194
1212 goto logio_done; 1195 goto logio_done;
1213 } 1196 }
1214 1197
1215 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1198 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1216 DEBUG2(printk(KERN_DEBUG 1199 ql_dbg(ql_dbg_async, vha, 0x5036,
1217 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " 1200 "Async-%s complete - portid=%02x%02x%02x "
1218 "iop0=%x.\n", 1201 "iop0=%x.\n",
1219 fcport->vha->host_no, sp->handle, type, 1202 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1220 fcport->d_id.b.domain, fcport->d_id.b.area,
1221 fcport->d_id.b.al_pa, 1203 fcport->d_id.b.al_pa,
1222 le32_to_cpu(logio->io_parameter[0]))); 1204 le32_to_cpu(logio->io_parameter[0]));
1223 1205
1224 data[0] = MBS_COMMAND_COMPLETE; 1206 data[0] = MBS_COMMAND_COMPLETE;
1225 if (ctx->type != SRB_LOGIN_CMD) 1207 if (ctx->type != SRB_LOGIN_CMD)
@@ -1256,14 +1238,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1256 break; 1238 break;
1257 } 1239 }
1258 1240
1259 DEBUG2(printk(KERN_WARNING 1241 ql_dbg(ql_dbg_async, vha, 0x5037,
1260 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x " 1242 "Async-%s failed - portid=%02x%02x%02x comp=%x "
1261 "iop0=%x iop1=%x.\n", 1243 "iop0=%x iop1=%x.\n",
1262 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, 1244 type, fcport->d_id.b.domain,
1263 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1245 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1264 le16_to_cpu(logio->comp_status), 1246 le16_to_cpu(logio->comp_status),
1265 le32_to_cpu(logio->io_parameter[0]), 1247 le32_to_cpu(logio->io_parameter[0]),
1266 le32_to_cpu(logio->io_parameter[1]))); 1248 le32_to_cpu(logio->io_parameter[1]));
1267 1249
1268logio_done: 1250logio_done:
1269 lio->done(sp); 1251 lio->done(sp);
@@ -1292,38 +1274,34 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1292 fcport = sp->fcport; 1274 fcport = sp->fcport;
1293 1275
1294 if (sts->entry_status) { 1276 if (sts->entry_status) {
1295 DEBUG2(printk(KERN_WARNING 1277 ql_log(ql_log_warn, vha, 0x5038,
1296 "scsi(%ld:%x): Async-%s error - entry-status(%x).\n", 1278 "Async-%s error - entry-status(%x).\n",
1297 fcport->vha->host_no, sp->handle, type, 1279 type, sts->entry_status);
1298 sts->entry_status));
1299 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1300 DEBUG2(printk(KERN_WARNING 1281 ql_log(ql_log_warn, vha, 0x5039,
1301 "scsi(%ld:%x): Async-%s error - completion status(%x).\n", 1282 "Async-%s error - completion status(%x).\n",
1302 fcport->vha->host_no, sp->handle, type, 1283 type, sts->comp_status);
1303 sts->comp_status));
1304 } else if (!(le16_to_cpu(sts->scsi_status) & 1284 } else if (!(le16_to_cpu(sts->scsi_status) &
1305 SS_RESPONSE_INFO_LEN_VALID)) { 1285 SS_RESPONSE_INFO_LEN_VALID)) {
1306 DEBUG2(printk(KERN_WARNING 1286 ql_log(ql_log_warn, vha, 0x503a,
1307 "scsi(%ld:%x): Async-%s error - no response info(%x).\n", 1287 "Async-%s error - no response info(%x).\n",
1308 fcport->vha->host_no, sp->handle, type, 1288 type, sts->scsi_status);
1309 sts->scsi_status));
1310 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1311 DEBUG2(printk(KERN_WARNING 1290 ql_log(ql_log_warn, vha, 0x503b,
1312 "scsi(%ld:%x): Async-%s error - not enough response(%d).\n", 1291 "Async-%s error - not enough response(%d).\n",
1313 fcport->vha->host_no, sp->handle, type, 1292 type, sts->rsp_data_len);
1314 sts->rsp_data_len));
1315 } else if (sts->data[3]) { 1293 } else if (sts->data[3]) {
1316 DEBUG2(printk(KERN_WARNING 1294 ql_log(ql_log_warn, vha, 0x503c,
1317 "scsi(%ld:%x): Async-%s error - response(%x).\n", 1295 "Async-%s error - response(%x).\n",
1318 fcport->vha->host_no, sp->handle, type, 1296 type, sts->data[3]);
1319 sts->data[3]));
1320 } else { 1297 } else {
1321 error = 0; 1298 error = 0;
1322 } 1299 }
1323 1300
1324 if (error) { 1301 if (error) {
1325 iocb->u.tmf.data = error; 1302 iocb->u.tmf.data = error;
1326 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts))); 1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1304 (uint8_t *)sts, sizeof(*sts));
1327 } 1305 }
1328 1306
1329 iocb->done(sp); 1307 iocb->done(sp);
@@ -1360,8 +1338,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1360 } 1338 }
1361 1339
1362 if (pkt->entry_status != 0) { 1340 if (pkt->entry_status != 0) {
1363 DEBUG3(printk(KERN_INFO 1341 ql_log(ql_log_warn, vha, 0x5035,
1364 "scsi(%ld): Process error entry.\n", vha->host_no)); 1342 "Process error entry.\n");
1365 1343
1366 qla2x00_error_entry(vha, rsp, pkt); 1344 qla2x00_error_entry(vha, rsp, pkt);
1367 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1345 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1399,10 +1377,10 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1399 break; 1377 break;
1400 default: 1378 default:
1401 /* Type Not Supported. */ 1379 /* Type Not Supported. */
1402 DEBUG4(printk(KERN_WARNING 1380 ql_log(ql_log_warn, vha, 0x504a,
1403 "scsi(%ld): Received unknown response pkt type %x " 1381 "Received unknown response pkt type %x "
1404 "entry status=%x.\n", 1382 "entry status=%x.\n",
1405 vha->host_no, pkt->entry_type, pkt->entry_status)); 1383 pkt->entry_type, pkt->entry_status);
1406 break; 1384 break;
1407 } 1385 }
1408 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1386 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1418,6 +1396,7 @@ static inline void
1418qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1396qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1419 uint32_t sense_len, struct rsp_que *rsp) 1397 uint32_t sense_len, struct rsp_que *rsp)
1420{ 1398{
1399 struct scsi_qla_host *vha = sp->fcport->vha;
1421 struct scsi_cmnd *cp = sp->cmd; 1400 struct scsi_cmnd *cp = sp->cmd;
1422 1401
1423 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1402 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
@@ -1435,11 +1414,13 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1435 if (sp->request_sense_length != 0) 1414 if (sp->request_sense_length != 0)
1436 rsp->status_srb = sp; 1415 rsp->status_srb = sp;
1437 1416
1438 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 1417 ql_dbg(ql_dbg_io, vha, 0x301c,
1439 "cmd=%p\n", __func__, sp->fcport->vha->host_no, 1418 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
1440 cp->device->channel, cp->device->id, cp->device->lun, cp)); 1419 sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
1420 cp->device->lun, cp);
1441 if (sense_len) 1421 if (sense_len)
1442 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); 1422 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1423 cp->sense_buffer, sense_len);
1443} 1424}
1444 1425
1445struct scsi_dif_tuple { 1426struct scsi_dif_tuple {
@@ -1457,6 +1438,7 @@ struct scsi_dif_tuple {
1457static inline void 1438static inline void
1458qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1439qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1459{ 1440{
1441 struct scsi_qla_host *vha = sp->fcport->vha;
1460 struct scsi_cmnd *cmd = sp->cmd; 1442 struct scsi_cmnd *cmd = sp->cmd;
1461 struct scsi_dif_tuple *ep = 1443 struct scsi_dif_tuple *ep =
1462 (struct scsi_dif_tuple *)&sts24->data[20]; 1444 (struct scsi_dif_tuple *)&sts24->data[20];
@@ -1473,15 +1455,15 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1473 e_guard = be16_to_cpu(ep->guard); 1455 e_guard = be16_to_cpu(ep->guard);
1474 a_guard = be16_to_cpu(ap->guard); 1456 a_guard = be16_to_cpu(ap->guard);
1475 1457
1476 DEBUG18(printk(KERN_DEBUG 1458 ql_dbg(ql_dbg_io, vha, 0x3023,
1477 "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24)); 1459 "iocb(s) %p Returned STATUS.\n", sts24);
1478 1460
1479 DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1461 ql_dbg(ql_dbg_io, vha, 0x3024,
1462 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1480 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1463 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1481 " tag=0x%x, act guard=0x%x, exp guard=0x%x\n", 1464 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1482 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1483 a_app_tag, e_app_tag, a_guard, e_guard)); 1466 a_app_tag, e_app_tag, a_guard, e_guard);
1484
1485 1467
1486 /* check guard */ 1468 /* check guard */
1487 if (e_guard != a_guard) { 1469 if (e_guard != a_guard) {
@@ -1569,9 +1551,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1569 sp = NULL; 1551 sp = NULL;
1570 1552
1571 if (sp == NULL) { 1553 if (sp == NULL) {
1572 qla_printk(KERN_WARNING, ha, 1554 ql_log(ql_log_warn, vha, 0x3017,
1573 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, 1555 "Invalid status handle (0x%x).\n", sts->handle);
1574 sts->handle);
1575 1556
1576 if (IS_QLA82XX(ha)) 1557 if (IS_QLA82XX(ha))
1577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1558 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1582,9 +1563,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1582 } 1563 }
1583 cp = sp->cmd; 1564 cp = sp->cmd;
1584 if (cp == NULL) { 1565 if (cp == NULL) {
1585 qla_printk(KERN_WARNING, ha, 1566 ql_log(ql_log_warn, vha, 0x3018,
1586 "scsi(%ld): Command already returned (0x%x/%p).\n", 1567 "Command already returned (0x%x/%p).\n",
1587 vha->host_no, sts->handle, sp); 1568 sts->handle, sp);
1588 1569
1589 return; 1570 return;
1590 } 1571 }
@@ -1629,10 +1610,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1629 par_sense_len -= rsp_info_len; 1610 par_sense_len -= rsp_info_len;
1630 } 1611 }
1631 if (rsp_info_len > 3 && rsp_info[3]) { 1612 if (rsp_info_len > 3 && rsp_info[3]) {
1632 DEBUG2(qla_printk(KERN_INFO, ha, 1613 ql_log(ql_log_warn, vha, 0x3019,
1633 "scsi(%ld:%d:%d): FCP I/O protocol failure " 1614 "FCP I/O protocol failure (0x%x/0x%x).\n",
1634 "(0x%x/0x%x).\n", vha->host_no, cp->device->id, 1615 rsp_info_len, rsp_info[3]);
1635 cp->device->lun, rsp_info_len, rsp_info[3]));
1636 1616
1637 cp->result = DID_BUS_BUSY << 16; 1617 cp->result = DID_BUS_BUSY << 16;
1638 goto out; 1618 goto out;
@@ -1661,11 +1641,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1661 if (!lscsi_status && 1641 if (!lscsi_status &&
1662 ((unsigned)(scsi_bufflen(cp) - resid) < 1642 ((unsigned)(scsi_bufflen(cp) - resid) <
1663 cp->underflow)) { 1643 cp->underflow)) {
1664 qla_printk(KERN_INFO, ha, 1644 ql_log(ql_log_warn, vha, 0x301a,
1665 "scsi(%ld:%d:%d): Mid-layer underflow " 1645 "Mid-layer underflow "
1666 "detected (0x%x of 0x%x bytes).\n", 1646 "detected (0x%x of 0x%x bytes).\n",
1667 vha->host_no, cp->device->id, 1647 resid, scsi_bufflen(cp));
1668 cp->device->lun, resid, scsi_bufflen(cp));
1669 1648
1670 cp->result = DID_ERROR << 16; 1649 cp->result = DID_ERROR << 16;
1671 break; 1650 break;
@@ -1674,9 +1653,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1674 cp->result = DID_OK << 16 | lscsi_status; 1653 cp->result = DID_OK << 16 | lscsi_status;
1675 1654
1676 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1655 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1677 DEBUG2(qla_printk(KERN_INFO, ha, 1656 ql_log(ql_log_warn, vha, 0x301b,
1678 "scsi(%ld:%d:%d) QUEUE FULL detected.\n", 1657 "QUEUE FULL detected.\n");
1679 vha->host_no, cp->device->id, cp->device->lun));
1680 break; 1658 break;
1681 } 1659 }
1682 logit = 0; 1660 logit = 0;
@@ -1697,11 +1675,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1697 scsi_set_resid(cp, resid); 1675 scsi_set_resid(cp, resid);
1698 if (scsi_status & SS_RESIDUAL_UNDER) { 1676 if (scsi_status & SS_RESIDUAL_UNDER) {
1699 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1677 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1700 DEBUG2(qla_printk(KERN_INFO, ha, 1678 ql_log(ql_log_warn, vha, 0x301d,
1701 "scsi(%ld:%d:%d) Dropped frame(s) detected " 1679 "Dropped frame(s) detected "
1702 "(0x%x of 0x%x bytes).\n", vha->host_no, 1680 "(0x%x of 0x%x bytes).\n",
1703 cp->device->id, cp->device->lun, resid, 1681 resid, scsi_bufflen(cp));
1704 scsi_bufflen(cp)));
1705 1682
1706 cp->result = DID_ERROR << 16 | lscsi_status; 1683 cp->result = DID_ERROR << 16 | lscsi_status;
1707 break; 1684 break;
@@ -1710,20 +1687,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1710 if (!lscsi_status && 1687 if (!lscsi_status &&
1711 ((unsigned)(scsi_bufflen(cp) - resid) < 1688 ((unsigned)(scsi_bufflen(cp) - resid) <
1712 cp->underflow)) { 1689 cp->underflow)) {
1713 qla_printk(KERN_INFO, ha, 1690 ql_log(ql_log_warn, vha, 0x301e,
1714 "scsi(%ld:%d:%d): Mid-layer underflow " 1691 "Mid-layer underflow "
1715 "detected (0x%x of 0x%x bytes).\n", 1692 "detected (0x%x of 0x%x bytes).\n",
1716 vha->host_no, cp->device->id, 1693 resid, scsi_bufflen(cp));
1717 cp->device->lun, resid, scsi_bufflen(cp));
1718 1694
1719 cp->result = DID_ERROR << 16; 1695 cp->result = DID_ERROR << 16;
1720 break; 1696 break;
1721 } 1697 }
1722 } else { 1698 } else {
1723 DEBUG2(qla_printk(KERN_INFO, ha, 1699 ql_log(ql_log_warn, vha, 0x301f,
1724 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " 1700 "Dropped frame(s) detected (0x%x "
1725 "of 0x%x bytes).\n", vha->host_no, cp->device->id, 1701 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1726 cp->device->lun, resid, scsi_bufflen(cp)));
1727 1702
1728 cp->result = DID_ERROR << 16 | lscsi_status; 1703 cp->result = DID_ERROR << 16 | lscsi_status;
1729 goto check_scsi_status; 1704 goto check_scsi_status;
@@ -1739,10 +1714,8 @@ check_scsi_status:
1739 */ 1714 */
1740 if (lscsi_status != 0) { 1715 if (lscsi_status != 0) {
1741 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1716 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1742 DEBUG2(qla_printk(KERN_INFO, ha, 1717 ql_log(ql_log_warn, vha, 0x3020,
1743 "scsi(%ld:%d:%d) QUEUE FULL detected.\n", 1718 "QUEUE FULL detected.\n");
1744 vha->host_no, cp->device->id,
1745 cp->device->lun));
1746 logit = 1; 1719 logit = 1;
1747 break; 1720 break;
1748 } 1721 }
@@ -1781,10 +1754,9 @@ check_scsi_status:
1781 break; 1754 break;
1782 } 1755 }
1783 1756
1784 DEBUG2(qla_printk(KERN_INFO, ha, 1757 ql_dbg(ql_dbg_io, vha, 0x3021,
1785 "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n", 1758 "Port down status: port-state=0x%x.\n",
1786 vha->host_no, cp->device->id, cp->device->lun, 1759 atomic_read(&fcport->state));
1787 atomic_read(&fcport->state)));
1788 1760
1789 if (atomic_read(&fcport->state) == FCS_ONLINE) 1761 if (atomic_read(&fcport->state) == FCS_ONLINE)
1790 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1762 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -1804,15 +1776,13 @@ check_scsi_status:
1804 1776
1805out: 1777out:
1806 if (logit) 1778 if (logit)
1807 DEBUG2(qla_printk(KERN_INFO, ha, 1779 ql_dbg(ql_dbg_io, vha, 0x3022,
1808 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " 1780 "FCP command status: 0x%x-0x%x (0x%x) "
1809 "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1781 "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1810 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, 1782 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1811 cp->device->id, cp->device->lun, comp_status, scsi_status, 1783 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
1812 cp->result, fcport->d_id.b.domain, fcport->d_id.b.area, 1784 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1813 fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1], 1785 resid_len, fw_resid_len);
1814 cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
1815 fw_resid_len));
1816 1786
1817 if (rsp->status_srb == NULL) 1787 if (rsp->status_srb == NULL)
1818 qla2x00_sp_compl(ha, sp); 1788 qla2x00_sp_compl(ha, sp);
@@ -1830,16 +1800,15 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1830{ 1800{
1831 uint8_t sense_sz = 0; 1801 uint8_t sense_sz = 0;
1832 struct qla_hw_data *ha = rsp->hw; 1802 struct qla_hw_data *ha = rsp->hw;
1803 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1833 srb_t *sp = rsp->status_srb; 1804 srb_t *sp = rsp->status_srb;
1834 struct scsi_cmnd *cp; 1805 struct scsi_cmnd *cp;
1835 1806
1836 if (sp != NULL && sp->request_sense_length != 0) { 1807 if (sp != NULL && sp->request_sense_length != 0) {
1837 cp = sp->cmd; 1808 cp = sp->cmd;
1838 if (cp == NULL) { 1809 if (cp == NULL) {
1839 DEBUG2(printk("%s(): Cmd already returned back to OS " 1810 ql_log(ql_log_warn, vha, 0x3025,
1840 "sp=%p.\n", __func__, sp)); 1811 "cmd is NULL: already returned to OS (sp=%p).\n",
1841 qla_printk(KERN_INFO, ha,
1842 "cmd is NULL: already returned to OS (sp=%p)\n",
1843 sp); 1812 sp);
1844 1813
1845 rsp->status_srb = NULL; 1814 rsp->status_srb = NULL;
@@ -1856,7 +1825,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1856 if (IS_FWI2_CAPABLE(ha)) 1825 if (IS_FWI2_CAPABLE(ha))
1857 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1826 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1858 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1827 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1859 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); 1828 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1829 sp->request_sense_ptr, sense_sz);
1860 1830
1861 sp->request_sense_ptr += sense_sz; 1831 sp->request_sense_ptr += sense_sz;
1862 sp->request_sense_length -= sense_sz; 1832 sp->request_sense_length -= sense_sz;
@@ -1882,21 +1852,25 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1882 uint32_t handle = LSW(pkt->handle); 1852 uint32_t handle = LSW(pkt->handle);
1883 uint16_t que = MSW(pkt->handle); 1853 uint16_t que = MSW(pkt->handle);
1884 struct req_que *req = ha->req_q_map[que]; 1854 struct req_que *req = ha->req_q_map[que];
1885#if defined(QL_DEBUG_LEVEL_2) 1855
1886 if (pkt->entry_status & RF_INV_E_ORDER) 1856 if (pkt->entry_status & RF_INV_E_ORDER)
1887 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1857 ql_dbg(ql_dbg_async, vha, 0x502a,
1858 "Invalid Entry Order.\n");
1888 else if (pkt->entry_status & RF_INV_E_COUNT) 1859 else if (pkt->entry_status & RF_INV_E_COUNT)
1889 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__); 1860 ql_dbg(ql_dbg_async, vha, 0x502b,
1861 "Invalid Entry Count.\n");
1890 else if (pkt->entry_status & RF_INV_E_PARAM) 1862 else if (pkt->entry_status & RF_INV_E_PARAM)
1891 qla_printk(KERN_ERR, ha, 1863 ql_dbg(ql_dbg_async, vha, 0x502c,
1892 "%s: Invalid Entry Parameter\n", __func__); 1864 "Invalid Entry Parameter.\n");
1893 else if (pkt->entry_status & RF_INV_E_TYPE) 1865 else if (pkt->entry_status & RF_INV_E_TYPE)
1894 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__); 1866 ql_dbg(ql_dbg_async, vha, 0x502d,
1867 "Invalid Entry Type.\n");
1895 else if (pkt->entry_status & RF_BUSY) 1868 else if (pkt->entry_status & RF_BUSY)
1896 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__); 1869 ql_dbg(ql_dbg_async, vha, 0x502e,
1870 "Busy.\n");
1897 else 1871 else
1898 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__); 1872 ql_dbg(ql_dbg_async, vha, 0x502f,
1899#endif 1873 "UNKNOWN flag error.\n");
1900 1874
1901 /* Validate handle. */ 1875 /* Validate handle. */
1902 if (handle < MAX_OUTSTANDING_COMMANDS) 1876 if (handle < MAX_OUTSTANDING_COMMANDS)
@@ -1923,10 +1897,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1923 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1897 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1924 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1898 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1925 || pkt->entry_type == COMMAND_TYPE_6) { 1899 || pkt->entry_type == COMMAND_TYPE_6) {
1926 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1900 ql_log(ql_log_warn, vha, 0x5030,
1927 vha->host_no)); 1901 "Error entry - invalid handle.\n");
1928 qla_printk(KERN_WARNING, ha,
1929 "Error entry - invalid handle\n");
1930 1902
1931 if (IS_QLA82XX(ha)) 1903 if (IS_QLA82XX(ha))
1932 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1904 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1960,11 +1932,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1960 } 1932 }
1961 1933
1962 if (ha->mcp) { 1934 if (ha->mcp) {
1963 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1935 ql_dbg(ql_dbg_async, vha, 0x504d,
1964 __func__, vha->host_no, ha->mcp->mb[0])); 1936 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1965 } else { 1937 } else {
1966 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1938 ql_dbg(ql_dbg_async, vha, 0x504e,
1967 __func__, vha->host_no)); 1939 "MBX pointer ERROR.\n");
1968 } 1940 }
1969} 1941}
1970 1942
@@ -1993,8 +1965,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1993 } 1965 }
1994 1966
1995 if (pkt->entry_status != 0) { 1967 if (pkt->entry_status != 0) {
1996 DEBUG3(printk(KERN_INFO 1968 ql_dbg(ql_dbg_async, vha, 0x5029,
1997 "scsi(%ld): Process error entry.\n", vha->host_no)); 1969 "Process error entry.\n");
1998 1970
1999 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 1971 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2000 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1972 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2030,10 +2002,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2030 break; 2002 break;
2031 default: 2003 default:
2032 /* Type Not Supported. */ 2004 /* Type Not Supported. */
2033 DEBUG4(printk(KERN_WARNING 2005 ql_dbg(ql_dbg_async, vha, 0x5042,
2034 "scsi(%ld): Received unknown response pkt type %x " 2006 "Received unknown response pkt type %x "
2035 "entry status=%x.\n", 2007 "entry status=%x.\n",
2036 vha->host_no, pkt->entry_type, pkt->entry_status)); 2008 pkt->entry_type, pkt->entry_status);
2037 break; 2009 break;
2038 } 2010 }
2039 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2011 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2088,7 +2060,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2088 2060
2089next_test: 2061next_test:
2090 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3) 2062 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2091 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n"); 2063 ql_log(ql_log_info, vha, 0x504c,
2064 "Additional code -- 0x55AA.\n");
2092 2065
2093done: 2066done:
2094 WRT_REG_DWORD(&reg->iobase_window, 0x0000); 2067 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
@@ -2121,7 +2094,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
2121 rsp = (struct rsp_que *) dev_id; 2094 rsp = (struct rsp_que *) dev_id;
2122 if (!rsp) { 2095 if (!rsp) {
2123 printk(KERN_INFO 2096 printk(KERN_INFO
2124 "%s(): NULL response queue pointer\n", __func__); 2097 "%s(): NULL response queue pointer.\n", __func__);
2125 return IRQ_NONE; 2098 return IRQ_NONE;
2126 } 2099 }
2127 2100
@@ -2142,8 +2115,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
2142 2115
2143 hccr = RD_REG_DWORD(&reg->hccr); 2116 hccr = RD_REG_DWORD(&reg->hccr);
2144 2117
2145 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 2118 ql_log(ql_log_warn, vha, 0x504b,
2146 "Dumping firmware!\n", hccr); 2119 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2120 hccr);
2147 2121
2148 qla2xxx_check_risc_status(vha); 2122 qla2xxx_check_risc_status(vha);
2149 2123
@@ -2174,9 +2148,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
2174 qla24xx_process_response_queue(vha, rsp); 2148 qla24xx_process_response_queue(vha, rsp);
2175 break; 2149 break;
2176 default: 2150 default:
2177 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2151 ql_dbg(ql_dbg_async, vha, 0x504f,
2178 "(%d).\n", 2152 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2179 vha->host_no, stat & 0xff));
2180 break; 2153 break;
2181 } 2154 }
2182 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2155 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2205,7 +2178,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2205 rsp = (struct rsp_que *) dev_id; 2178 rsp = (struct rsp_que *) dev_id;
2206 if (!rsp) { 2179 if (!rsp) {
2207 printk(KERN_INFO 2180 printk(KERN_INFO
2208 "%s(): NULL response queue pointer\n", __func__); 2181 "%s(): NULL response queue pointer.\n", __func__);
2209 return IRQ_NONE; 2182 return IRQ_NONE;
2210 } 2183 }
2211 ha = rsp->hw; 2184 ha = rsp->hw;
@@ -2235,7 +2208,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2235 rsp = (struct rsp_que *) dev_id; 2208 rsp = (struct rsp_que *) dev_id;
2236 if (!rsp) { 2209 if (!rsp) {
2237 printk(KERN_INFO 2210 printk(KERN_INFO
2238 "%s(): NULL response queue pointer\n", __func__); 2211 "%s(): NULL response queue pointer.\n", __func__);
2239 return IRQ_NONE; 2212 return IRQ_NONE;
2240 } 2213 }
2241 ha = rsp->hw; 2214 ha = rsp->hw;
@@ -2268,8 +2241,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2268 2241
2269 rsp = (struct rsp_que *) dev_id; 2242 rsp = (struct rsp_que *) dev_id;
2270 if (!rsp) { 2243 if (!rsp) {
2271 DEBUG(printk( 2244 printk(KERN_INFO
2272 "%s(): NULL response queue pointer\n", __func__)); 2245 "%s(): NULL response queue pointer.\n", __func__);
2273 return IRQ_NONE; 2246 return IRQ_NONE;
2274 } 2247 }
2275 ha = rsp->hw; 2248 ha = rsp->hw;
@@ -2286,8 +2259,9 @@ qla24xx_msix_default(int irq, void *dev_id)
2286 2259
2287 hccr = RD_REG_DWORD(&reg->hccr); 2260 hccr = RD_REG_DWORD(&reg->hccr);
2288 2261
2289 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 2262 ql_log(ql_log_info, vha, 0x5050,
2290 "Dumping firmware!\n", hccr); 2263 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2264 hccr);
2291 2265
2292 qla2xxx_check_risc_status(vha); 2266 qla2xxx_check_risc_status(vha);
2293 2267
@@ -2318,9 +2292,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2318 qla24xx_process_response_queue(vha, rsp); 2292 qla24xx_process_response_queue(vha, rsp);
2319 break; 2293 break;
2320 default: 2294 default:
2321 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2295 ql_dbg(ql_dbg_async, vha, 0x5051,
2322 "(%d).\n", 2296 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2323 vha->host_no, stat & 0xff));
2324 break; 2297 break;
2325 } 2298 }
2326 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2299 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2358,6 +2331,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2358{ 2331{
2359 int i; 2332 int i;
2360 struct qla_msix_entry *qentry; 2333 struct qla_msix_entry *qentry;
2334 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2361 2335
2362 for (i = 0; i < ha->msix_count; i++) { 2336 for (i = 0; i < ha->msix_count; i++) {
2363 qentry = &ha->msix_entries[i]; 2337 qentry = &ha->msix_entries[i];
@@ -2368,6 +2342,8 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2368 kfree(ha->msix_entries); 2342 kfree(ha->msix_entries);
2369 ha->msix_entries = NULL; 2343 ha->msix_entries = NULL;
2370 ha->flags.msix_enabled = 0; 2344 ha->flags.msix_enabled = 0;
2345 ql_dbg(ql_dbg_init, vha, 0x0042,
2346 "Disabled the MSI.\n");
2371} 2347}
2372 2348
2373static int 2349static int
@@ -2377,11 +2353,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2377 int i, ret; 2353 int i, ret;
2378 struct msix_entry *entries; 2354 struct msix_entry *entries;
2379 struct qla_msix_entry *qentry; 2355 struct qla_msix_entry *qentry;
2356 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2380 2357
2381 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2358 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2382 GFP_KERNEL); 2359 GFP_KERNEL);
2383 if (!entries) 2360 if (!entries) {
2361 ql_log(ql_log_warn, vha, 0x00bc,
2362 "Failed to allocate memory for msix_entry.\n");
2384 return -ENOMEM; 2363 return -ENOMEM;
2364 }
2385 2365
2386 for (i = 0; i < ha->msix_count; i++) 2366 for (i = 0; i < ha->msix_count; i++)
2387 entries[i].entry = i; 2367 entries[i].entry = i;
@@ -2391,16 +2371,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2391 if (ret < MIN_MSIX_COUNT) 2371 if (ret < MIN_MSIX_COUNT)
2392 goto msix_failed; 2372 goto msix_failed;
2393 2373
2394 qla_printk(KERN_WARNING, ha, 2374 ql_log(ql_log_warn, vha, 0x00c6,
2395 "MSI-X: Failed to enable support -- %d/%d\n" 2375 "MSI-X: Failed to enable support "
2396 " Retry with %d vectors\n", ha->msix_count, ret, ret); 2376 "-- %d/%d\n Retry with %d vectors.\n",
2377 ha->msix_count, ret, ret);
2397 ha->msix_count = ret; 2378 ha->msix_count = ret;
2398 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2379 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2399 if (ret) { 2380 if (ret) {
2400msix_failed: 2381msix_failed:
2401 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" 2382 ql_log(ql_log_fatal, vha, 0x00c7,
2402 " support, giving up -- %d/%d\n", 2383 "MSI-X: Failed to enable support, "
2403 ha->msix_count, ret); 2384 "giving up -- %d/%d.\n",
2385 ha->msix_count, ret);
2404 goto msix_out; 2386 goto msix_out;
2405 } 2387 }
2406 ha->max_rsp_queues = ha->msix_count - 1; 2388 ha->max_rsp_queues = ha->msix_count - 1;
@@ -2408,6 +2390,8 @@ msix_failed:
2408 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2390 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2409 ha->msix_count, GFP_KERNEL); 2391 ha->msix_count, GFP_KERNEL);
2410 if (!ha->msix_entries) { 2392 if (!ha->msix_entries) {
2393 ql_log(ql_log_fatal, vha, 0x00c8,
2394 "Failed to allocate memory for ha->msix_entries.\n");
2411 ret = -ENOMEM; 2395 ret = -ENOMEM;
2412 goto msix_out; 2396 goto msix_out;
2413 } 2397 }
@@ -2434,9 +2418,9 @@ msix_failed:
2434 0, msix_entries[i].name, rsp); 2418 0, msix_entries[i].name, rsp);
2435 } 2419 }
2436 if (ret) { 2420 if (ret) {
2437 qla_printk(KERN_WARNING, ha, 2421 ql_log(ql_log_fatal, vha, 0x00cb,
2438 "MSI-X: Unable to register handler -- %x/%d.\n", 2422 "MSI-X: unable to register handler -- %x/%d.\n",
2439 qentry->vector, ret); 2423 qentry->vector, ret);
2440 qla24xx_disable_msix(ha); 2424 qla24xx_disable_msix(ha);
2441 ha->mqenable = 0; 2425 ha->mqenable = 0;
2442 goto msix_out; 2426 goto msix_out;
@@ -2449,6 +2433,12 @@ msix_failed:
2449 /* Enable MSI-X vector for response queue update for queue 0 */ 2433 /* Enable MSI-X vector for response queue update for queue 0 */
2450 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2434 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2451 ha->mqenable = 1; 2435 ha->mqenable = 1;
2436 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2437 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2438 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2439 ql_dbg(ql_dbg_init, vha, 0x0055,
2440 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2441 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2452 2442
2453msix_out: 2443msix_out:
2454 kfree(entries); 2444 kfree(entries);
@@ -2460,6 +2450,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2460{ 2450{
2461 int ret; 2451 int ret;
2462 device_reg_t __iomem *reg = ha->iobase; 2452 device_reg_t __iomem *reg = ha->iobase;
2453 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2463 2454
2464 /* If possible, enable MSI-X. */ 2455 /* If possible, enable MSI-X. */
2465 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2456 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
@@ -2470,30 +2461,30 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2470 (ha->pdev->subsystem_device == 0x7040 || 2461 (ha->pdev->subsystem_device == 0x7040 ||
2471 ha->pdev->subsystem_device == 0x7041 || 2462 ha->pdev->subsystem_device == 0x7041 ||
2472 ha->pdev->subsystem_device == 0x1705)) { 2463 ha->pdev->subsystem_device == 0x1705)) {
2473 DEBUG2(qla_printk(KERN_WARNING, ha, 2464 ql_log(ql_log_warn, vha, 0x0034,
2474 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n", 2465 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2475 ha->pdev->subsystem_vendor, 2466 ha->pdev->subsystem_vendor,
2476 ha->pdev->subsystem_device)); 2467 ha->pdev->subsystem_device);
2477 goto skip_msi; 2468 goto skip_msi;
2478 } 2469 }
2479 2470
2480 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2471 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2481 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 2472 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2482 DEBUG2(qla_printk(KERN_WARNING, ha, 2473 ql_log(ql_log_warn, vha, 0x0035,
2483 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 2474 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2484 ha->pdev->revision, ha->fw_attributes)); 2475 ha->pdev->revision, ha->fw_attributes);
2485 goto skip_msix; 2476 goto skip_msix;
2486 } 2477 }
2487 2478
2488 ret = qla24xx_enable_msix(ha, rsp); 2479 ret = qla24xx_enable_msix(ha, rsp);
2489 if (!ret) { 2480 if (!ret) {
2490 DEBUG2(qla_printk(KERN_INFO, ha, 2481 ql_dbg(ql_dbg_init, vha, 0x0036,
2491 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 2482 "MSI-X: Enabled (0x%X, 0x%X).\n",
2492 ha->fw_attributes)); 2483 ha->chip_revision, ha->fw_attributes);
2493 goto clear_risc_ints; 2484 goto clear_risc_ints;
2494 } 2485 }
2495 qla_printk(KERN_WARNING, ha, 2486 ql_log(ql_log_info, vha, 0x0037,
2496 "MSI-X: Falling back-to MSI mode -- %d.\n", ret); 2487 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2497skip_msix: 2488skip_msix:
2498 2489
2499 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2490 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2502,18 +2493,19 @@ skip_msix:
2502 2493
2503 ret = pci_enable_msi(ha->pdev); 2494 ret = pci_enable_msi(ha->pdev);
2504 if (!ret) { 2495 if (!ret) {
2505 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 2496 ql_dbg(ql_dbg_init, vha, 0x0038,
2497 "MSI: Enabled.\n");
2506 ha->flags.msi_enabled = 1; 2498 ha->flags.msi_enabled = 1;
2507 } else 2499 } else
2508 qla_printk(KERN_WARNING, ha, 2500 ql_log(ql_log_warn, vha, 0x0039,
2509 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 2501 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2510skip_msi: 2502skip_msi:
2511 2503
2512 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2504 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2513 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2505 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2514 QLA2XXX_DRIVER_NAME, rsp); 2506 QLA2XXX_DRIVER_NAME, rsp);
2515 if (ret) { 2507 if (ret) {
2516 qla_printk(KERN_WARNING, ha, 2508 ql_log(ql_log_warn, vha, 0x003a,
2517 "Failed to reserve interrupt %d already in use.\n", 2509 "Failed to reserve interrupt %d already in use.\n",
2518 ha->pdev->irq); 2510 ha->pdev->irq);
2519 goto fail; 2511 goto fail;
@@ -2563,13 +2555,14 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2563 struct qla_hw_data *ha = rsp->hw; 2555 struct qla_hw_data *ha = rsp->hw;
2564 struct qla_init_msix_entry *intr = &msix_entries[2]; 2556 struct qla_init_msix_entry *intr = &msix_entries[2];
2565 struct qla_msix_entry *msix = rsp->msix; 2557 struct qla_msix_entry *msix = rsp->msix;
2558 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2566 int ret; 2559 int ret;
2567 2560
2568 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2561 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2569 if (ret) { 2562 if (ret) {
2570 qla_printk(KERN_WARNING, ha, 2563 ql_log(ql_log_fatal, vha, 0x00e6,
2571 "MSI-X: Unable to register handler -- %x/%d.\n", 2564 "MSI-X: Unable to register handler -- %x/%d.\n",
2572 msix->vector, ret); 2565 msix->vector, ret);
2573 return ret; 2566 return ret;
2574 } 2567 }
2575 msix->have_irq = 1; 2568 msix->have_irq = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index c26f0acdfecc..f7604ea1af83 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,14 +46,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
46 struct qla_hw_data *ha = vha->hw; 46 struct qla_hw_data *ha = vha->hw;
47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
48 48
49 if (ha->pdev->error_state > pci_channel_io_frozen) 49 ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
50
51 if (ha->pdev->error_state > pci_channel_io_frozen) {
52 ql_log(ql_log_warn, base_vha, 0x1001,
53 "error_state is greater than pci_channel_io_frozen, "
54 "exiting.\n");
50 return QLA_FUNCTION_TIMEOUT; 55 return QLA_FUNCTION_TIMEOUT;
56 }
51 57
52 if (vha->device_flags & DFLG_DEV_FAILED) { 58 if (vha->device_flags & DFLG_DEV_FAILED) {
53 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 59 ql_log(ql_log_warn, base_vha, 0x1002,
54 "%s(%ld): Device in failed state, " 60 "Device in failed state, exiting.\n");
55 "timeout MBX Exiting.\n",
56 __func__, base_vha->host_no));
57 return QLA_FUNCTION_TIMEOUT; 61 return QLA_FUNCTION_TIMEOUT;
58 } 62 }
59 63
@@ -63,17 +67,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
63 rval = QLA_SUCCESS; 67 rval = QLA_SUCCESS;
64 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 68 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
65 69
66 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
67 70
68 if (ha->flags.pci_channel_io_perm_failure) { 71 if (ha->flags.pci_channel_io_perm_failure) {
69 DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " 72 ql_log(ql_log_warn, base_vha, 0x1003,
70 "Exiting.\n", __func__, vha->host_no)); 73 "Perm failure on EEH timeout MBX, exiting.\n");
71 return QLA_FUNCTION_TIMEOUT; 74 return QLA_FUNCTION_TIMEOUT;
72 } 75 }
73 76
74 if (ha->flags.isp82xx_fw_hung) { 77 if (ha->flags.isp82xx_fw_hung) {
75 /* Setting Link-Down error */ 78 /* Setting Link-Down error */
76 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
77 rval = QLA_FUNCTION_FAILED; 82 rval = QLA_FUNCTION_FAILED;
78 goto premature_exit; 83 goto premature_exit;
79 } 84 }
@@ -85,8 +90,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
85 */ 90 */
86 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 91 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
87 /* Timeout occurred. Return error. */ 92 /* Timeout occurred. Return error. */
88 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 93 ql_log(ql_log_warn, base_vha, 0x1005,
89 "Exiting.\n", __func__, base_vha->host_no)); 94 "Cmd access timeout, Exiting.\n");
90 return QLA_FUNCTION_TIMEOUT; 95 return QLA_FUNCTION_TIMEOUT;
91 } 96 }
92 97
@@ -94,8 +99,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
94 /* Save mailbox command for debug */ 99 /* Save mailbox command for debug */
95 ha->mcp = mcp; 100 ha->mcp = mcp;
96 101
97 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 102 ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
98 base_vha->host_no, mcp->mb[0])); 103 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
99 104
100 spin_lock_irqsave(&ha->hardware_lock, flags); 105 spin_lock_irqsave(&ha->hardware_lock, flags);
101 106
@@ -123,27 +128,30 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
123 iptr++; 128 iptr++;
124 } 129 }
125 130
126#if defined(QL_DEBUG_LEVEL_1) 131 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
127 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", 132 "Loaded MBX registers (displayed in bytes) =.\n");
128 __func__, base_vha->host_no); 133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
129 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); 134 (uint8_t *)mcp->mb, 16);
130 printk("\n"); 135 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
131 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); 136 ".\n");
132 printk("\n"); 137 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
133 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); 138 ((uint8_t *)mcp->mb + 0x10), 16);
134 printk("\n"); 139 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
135 printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no, 140 ".\n");
136 optr); 141 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
137 qla2x00_dump_regs(base_vha); 142 ((uint8_t *)mcp->mb + 0x20), 8);
138#endif 143 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
144 "I/O Address = %p.\n", optr);
145 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
139 146
140 /* Issue set host interrupt command to send cmd out. */ 147 /* Issue set host interrupt command to send cmd out. */
141 ha->flags.mbox_int = 0; 148 ha->flags.mbox_int = 0;
142 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 149 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
143 150
144 /* Unlock mbx registers and wait for interrupt */ 151 /* Unlock mbx registers and wait for interrupt */
145 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 152 ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
146 "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies)); 153 "Going to unlock irq & waiting for interrupts. "
154 "jiffies=%lx.\n", jiffies);
147 155
148 /* Wait for mbx cmd completion until timeout */ 156 /* Wait for mbx cmd completion until timeout */
149 157
@@ -155,9 +163,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
155 HINT_MBX_INT_PENDING) { 163 HINT_MBX_INT_PENDING) {
156 spin_unlock_irqrestore(&ha->hardware_lock, 164 spin_unlock_irqrestore(&ha->hardware_lock,
157 flags); 165 flags);
158 DEBUG2_3_11(printk(KERN_INFO 166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
159 "%s(%ld): Pending Mailbox timeout. " 167 "Pending mailbox timeout, exiting.\n");
160 "Exiting.\n", __func__, base_vha->host_no));
161 rval = QLA_FUNCTION_TIMEOUT; 168 rval = QLA_FUNCTION_TIMEOUT;
162 goto premature_exit; 169 goto premature_exit;
163 } 170 }
@@ -173,17 +180,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
173 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 180 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
174 181
175 } else { 182 } else {
176 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 183 ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
177 base_vha->host_no, command)); 184 "Cmd=%x Polling Mode.\n", command);
178 185
179 if (IS_QLA82XX(ha)) { 186 if (IS_QLA82XX(ha)) {
180 if (RD_REG_DWORD(&reg->isp82.hint) & 187 if (RD_REG_DWORD(&reg->isp82.hint) &
181 HINT_MBX_INT_PENDING) { 188 HINT_MBX_INT_PENDING) {
182 spin_unlock_irqrestore(&ha->hardware_lock, 189 spin_unlock_irqrestore(&ha->hardware_lock,
183 flags); 190 flags);
184 DEBUG2_3_11(printk(KERN_INFO 191 ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
185 "%s(%ld): Pending Mailbox timeout. " 192 "Pending mailbox timeout, exiting.\n");
186 "Exiting.\n", __func__, base_vha->host_no));
187 rval = QLA_FUNCTION_TIMEOUT; 193 rval = QLA_FUNCTION_TIMEOUT;
188 goto premature_exit; 194 goto premature_exit;
189 } 195 }
@@ -207,17 +213,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
207 command == MBC_LOAD_RISC_RAM_EXTENDED)) 213 command == MBC_LOAD_RISC_RAM_EXTENDED))
208 msleep(10); 214 msleep(10);
209 } /* while */ 215 } /* while */
210 DEBUG17(qla_printk(KERN_WARNING, ha, 216 ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
211 "Waited %d sec\n", 217 "Waited %d sec.\n",
212 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); 218 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
213 } 219 }
214 220
215 /* Check whether we timed out */ 221 /* Check whether we timed out */
216 if (ha->flags.mbox_int) { 222 if (ha->flags.mbox_int) {
217 uint16_t *iptr2; 223 uint16_t *iptr2;
218 224
219 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 225 ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
220 base_vha->host_no, command)); 226 "Cmd=%x completed.\n", command);
221 227
222 /* Got interrupt. Clear the flag. */ 228 /* Got interrupt. Clear the flag. */
223 ha->flags.mbox_int = 0; 229 ha->flags.mbox_int = 0;
@@ -229,6 +235,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
229 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 235 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
230 ha->mcp = NULL; 236 ha->mcp = NULL;
231 rval = QLA_FUNCTION_FAILED; 237 rval = QLA_FUNCTION_FAILED;
238 ql_log(ql_log_warn, base_vha, 0x1015,
239 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
232 goto premature_exit; 240 goto premature_exit;
233 } 241 }
234 242
@@ -249,8 +257,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
249 } 257 }
250 } else { 258 } else {
251 259
252#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3) || \
253 defined(QL_DEBUG_LEVEL_11)
254 uint16_t mb0; 260 uint16_t mb0;
255 uint32_t ictrl; 261 uint32_t ictrl;
256 262
@@ -261,14 +267,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
261 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); 267 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
262 ictrl = RD_REG_WORD(&reg->isp.ictrl); 268 ictrl = RD_REG_WORD(&reg->isp.ictrl);
263 } 269 }
264 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 270 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
265 __func__, base_vha->host_no, command); 271 "MBX Command timeout for cmd %x.\n", command);
266 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, 272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
267 base_vha->host_no, ictrl, jiffies); 273 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
268 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, 274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
269 base_vha->host_no, mb0); 275 "mb[0] = 0x%x.\n", mb0);
270 qla2x00_dump_regs(base_vha); 276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
271#endif
272 277
273 rval = QLA_FUNCTION_TIMEOUT; 278 rval = QLA_FUNCTION_TIMEOUT;
274 } 279 }
@@ -279,8 +284,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
279 ha->mcp = NULL; 284 ha->mcp = NULL;
280 285
281 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 286 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
282 DEBUG11(printk("%s(%ld): checking for additional resp " 287 ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
283 "interrupt.\n", __func__, base_vha->host_no)); 288 "Checking for additional resp interrupt.\n");
284 289
285 /* polling mode for non isp_abort commands. */ 290 /* polling mode for non isp_abort commands. */
286 qla2x00_poll(ha->rsp_q_map[0]); 291 qla2x00_poll(ha->rsp_q_map[0]);
@@ -291,38 +296,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
291 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 296 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
292 ha->flags.eeh_busy) { 297 ha->flags.eeh_busy) {
293 /* not in dpc. schedule it for dpc to take over. */ 298 /* not in dpc. schedule it for dpc to take over. */
294 DEBUG(printk("%s(%ld): timeout schedule " 299 ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
295 "isp_abort_needed.\n", __func__, 300 "Timeout, schedule isp_abort_needed.\n");
296 base_vha->host_no));
297 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
298 "isp_abort_needed.\n", __func__,
299 base_vha->host_no));
300 301
301 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 302 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
302 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 303 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
303 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 304 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
304 305
305 qla_printk(KERN_WARNING, ha, 306 ql_log(ql_log_info, base_vha, 0x101c,
306 "Mailbox command timeout occurred. " 307 "Mailbox cmd timeout occured. "
307 "Scheduling ISP " "abort. eeh_busy: 0x%x\n", 308 "Scheduling ISP abort eeh_busy=0x%x.\n",
308 ha->flags.eeh_busy); 309 ha->flags.eeh_busy);
309 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 310 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
310 qla2xxx_wake_dpc(vha); 311 qla2xxx_wake_dpc(vha);
311 } 312 }
312 } else if (!abort_active) { 313 } else if (!abort_active) {
313 /* call abort directly since we are in the DPC thread */ 314 /* call abort directly since we are in the DPC thread */
314 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 315 ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
315 __func__, base_vha->host_no)); 316 "Timeout, calling abort_isp.\n");
316 DEBUG2_3_11(printk("%s(%ld): timeout calling "
317 "abort_isp\n", __func__, base_vha->host_no));
318 317
319 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 318 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
320 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 319 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
321 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 320 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
322 321
323 qla_printk(KERN_WARNING, ha, 322 ql_log(ql_log_info, base_vha, 0x101e,
324 "Mailbox command timeout occurred. " 323 "Mailbox cmd timeout occured. "
325 "Issuing ISP abort.\n"); 324 "Scheduling ISP abort.\n");
326 325
327 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 326 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
328 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 327 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -332,11 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
332 &vha->dpc_flags); 331 &vha->dpc_flags);
333 } 332 }
334 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 333 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
335 DEBUG(printk("%s(%ld): finished abort_isp\n", 334 ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
336 __func__, vha->host_no)); 335 "Finished abort_isp.\n");
337 DEBUG2_3_11(printk(
338 "%s(%ld): finished abort_isp\n",
339 __func__, vha->host_no));
340 } 336 }
341 } 337 }
342 } 338 }
@@ -346,12 +342,11 @@ premature_exit:
346 complete(&ha->mbx_cmd_comp); 342 complete(&ha->mbx_cmd_comp);
347 343
348 if (rval) { 344 if (rval) {
349 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 345 ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
350 "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no, 346 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
351 mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); 347 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
352 } else { 348 } else {
353 DEBUG11(printk("%s(%ld): done.\n", __func__, 349 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
354 base_vha->host_no));
355 } 350 }
356 351
357 return rval; 352 return rval;
@@ -366,7 +361,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
366 mbx_cmd_t mc; 361 mbx_cmd_t mc;
367 mbx_cmd_t *mcp = &mc; 362 mbx_cmd_t *mcp = &mc;
368 363
369 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 364 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
370 365
371 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 366 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
372 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 367 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -397,10 +392,10 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
397 rval = qla2x00_mailbox_command(vha, mcp); 392 rval = qla2x00_mailbox_command(vha, mcp);
398 393
399 if (rval != QLA_SUCCESS) { 394 if (rval != QLA_SUCCESS) {
400 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 395 ql_dbg(ql_dbg_mbx, vha, 0x1023,
401 vha->host_no, rval, mcp->mb[0])); 396 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
402 } else { 397 } else {
403 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 398 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
404 } 399 }
405 400
406 return rval; 401 return rval;
@@ -430,7 +425,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
430 mbx_cmd_t mc; 425 mbx_cmd_t mc;
431 mbx_cmd_t *mcp = &mc; 426 mbx_cmd_t *mcp = &mc;
432 427
433 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 428 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
434 429
435 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 430 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
436 mcp->out_mb = MBX_0; 431 mcp->out_mb = MBX_0;
@@ -461,15 +456,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
461 rval = qla2x00_mailbox_command(vha, mcp); 456 rval = qla2x00_mailbox_command(vha, mcp);
462 457
463 if (rval != QLA_SUCCESS) { 458 if (rval != QLA_SUCCESS) {
464 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 459 ql_dbg(ql_dbg_mbx, vha, 0x1026,
465 vha->host_no, rval, mcp->mb[0])); 460 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
466 } else { 461 } else {
467 if (IS_FWI2_CAPABLE(ha)) { 462 if (IS_FWI2_CAPABLE(ha)) {
468 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 463 ql_dbg(ql_dbg_mbx, vha, 0x1027,
469 __func__, vha->host_no, mcp->mb[1])); 464 "Done exchanges=%x.\n", mcp->mb[1]);
470 } else { 465 } else {
471 DEBUG11(printk("%s(%ld): done.\n", __func__, 466 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
472 vha->host_no));
473 } 467 }
474 } 468 }
475 469
@@ -501,7 +495,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
501 mbx_cmd_t mc; 495 mbx_cmd_t mc;
502 mbx_cmd_t *mcp = &mc; 496 mbx_cmd_t *mcp = &mc;
503 497
504 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 498 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
505 499
506 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 500 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
507 mcp->out_mb = MBX_0; 501 mcp->out_mb = MBX_0;
@@ -535,11 +529,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
535failed: 529failed:
536 if (rval != QLA_SUCCESS) { 530 if (rval != QLA_SUCCESS) {
537 /*EMPTY*/ 531 /*EMPTY*/
538 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 532 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
539 vha->host_no, rval));
540 } else { 533 } else {
541 /*EMPTY*/ 534 /*EMPTY*/
542 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 535 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
543 } 536 }
544 return rval; 537 return rval;
545} 538}
@@ -565,7 +558,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
565 mbx_cmd_t mc; 558 mbx_cmd_t mc;
566 mbx_cmd_t *mcp = &mc; 559 mbx_cmd_t *mcp = &mc;
567 560
568 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 561 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
569 562
570 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 563 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
571 mcp->out_mb = MBX_0; 564 mcp->out_mb = MBX_0;
@@ -576,15 +569,14 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
576 569
577 if (rval != QLA_SUCCESS) { 570 if (rval != QLA_SUCCESS) {
578 /*EMPTY*/ 571 /*EMPTY*/
579 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 572 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
580 vha->host_no, rval));
581 } else { 573 } else {
582 fwopts[0] = mcp->mb[0]; 574 fwopts[0] = mcp->mb[0];
583 fwopts[1] = mcp->mb[1]; 575 fwopts[1] = mcp->mb[1];
584 fwopts[2] = mcp->mb[2]; 576 fwopts[2] = mcp->mb[2];
585 fwopts[3] = mcp->mb[3]; 577 fwopts[3] = mcp->mb[3];
586 578
587 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 579 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
588 } 580 }
589 581
590 return rval; 582 return rval;
@@ -612,7 +604,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
612 mbx_cmd_t mc; 604 mbx_cmd_t mc;
613 mbx_cmd_t *mcp = &mc; 605 mbx_cmd_t *mcp = &mc;
614 606
615 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 607 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
616 608
617 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 609 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
618 mcp->mb[1] = fwopts[1]; 610 mcp->mb[1] = fwopts[1];
@@ -636,11 +628,11 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
636 628
637 if (rval != QLA_SUCCESS) { 629 if (rval != QLA_SUCCESS) {
638 /*EMPTY*/ 630 /*EMPTY*/
639 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, 631 ql_dbg(ql_dbg_mbx, vha, 0x1030,
640 vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 632 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
641 } else { 633 } else {
642 /*EMPTY*/ 634 /*EMPTY*/
643 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 635 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
644 } 636 }
645 637
646 return rval; 638 return rval;
@@ -668,7 +660,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
668 mbx_cmd_t mc; 660 mbx_cmd_t mc;
669 mbx_cmd_t *mcp = &mc; 661 mbx_cmd_t *mcp = &mc;
670 662
671 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no)); 663 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
672 664
673 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 665 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
674 mcp->mb[1] = 0xAAAA; 666 mcp->mb[1] = 0xAAAA;
@@ -695,12 +687,10 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
695 687
696 if (rval != QLA_SUCCESS) { 688 if (rval != QLA_SUCCESS) {
697 /*EMPTY*/ 689 /*EMPTY*/
698 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 690 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
699 vha->host_no, rval));
700 } else { 691 } else {
701 /*EMPTY*/ 692 /*EMPTY*/
702 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 693 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
703 vha->host_no));
704 } 694 }
705 695
706 return rval; 696 return rval;
@@ -728,7 +718,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
728 mbx_cmd_t mc; 718 mbx_cmd_t mc;
729 mbx_cmd_t *mcp = &mc; 719 mbx_cmd_t *mcp = &mc;
730 720
731 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 721 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
732 722
733 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 723 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
734 mcp->out_mb = MBX_0; 724 mcp->out_mb = MBX_0;
@@ -749,11 +739,11 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
749 rval = qla2x00_mailbox_command(vha, mcp); 739 rval = qla2x00_mailbox_command(vha, mcp);
750 740
751 if (rval != QLA_SUCCESS) { 741 if (rval != QLA_SUCCESS) {
752 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 742 ql_dbg(ql_dbg_mbx, vha, 0x1036,
753 vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ? 743 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
754 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); 744 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
755 } else { 745 } else {
756 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 746 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
757 } 747 }
758 748
759 return rval; 749 return rval;
@@ -785,6 +775,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
785 mbx_cmd_t mc; 775 mbx_cmd_t mc;
786 mbx_cmd_t *mcp = &mc; 776 mbx_cmd_t *mcp = &mc;
787 777
778 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
779
788 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 780 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
789 mcp->mb[1] = 0; 781 mcp->mb[1] = 0;
790 mcp->mb[2] = MSW(phys_addr); 782 mcp->mb[2] = MSW(phys_addr);
@@ -799,14 +791,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
799 791
800 if (rval != QLA_SUCCESS) { 792 if (rval != QLA_SUCCESS) {
801 /*EMPTY*/ 793 /*EMPTY*/
802 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 794 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
803 vha->host_no, rval));
804 } else { 795 } else {
805 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 796 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
806 797
807 /* Mask reserved bits. */ 798 /* Mask reserved bits. */
808 sts_entry->entry_status &= 799 sts_entry->entry_status &=
809 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 800 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
801 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
810 } 802 }
811 803
812 return rval; 804 return rval;
@@ -847,7 +839,7 @@ qla2x00_abort_command(srb_t *sp)
847 struct qla_hw_data *ha = vha->hw; 839 struct qla_hw_data *ha = vha->hw;
848 struct req_que *req = vha->req; 840 struct req_que *req = vha->req;
849 841
850 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 842 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
851 843
852 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
853 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 845 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -876,11 +868,9 @@ qla2x00_abort_command(srb_t *sp)
876 rval = qla2x00_mailbox_command(vha, mcp); 868 rval = qla2x00_mailbox_command(vha, mcp);
877 869
878 if (rval != QLA_SUCCESS) { 870 if (rval != QLA_SUCCESS) {
879 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 871 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
880 vha->host_no, rval));
881 } else { 872 } else {
882 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 873 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
883 vha->host_no));
884 } 874 }
885 875
886 return rval; 876 return rval;
@@ -896,10 +886,11 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
896 struct req_que *req; 886 struct req_que *req;
897 struct rsp_que *rsp; 887 struct rsp_que *rsp;
898 888
899 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
900
901 l = l; 889 l = l;
902 vha = fcport->vha; 890 vha = fcport->vha;
891
892 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
893
903 req = vha->hw->req_q_map[0]; 894 req = vha->hw->req_q_map[0];
904 rsp = req->rsp; 895 rsp = req->rsp;
905 mcp->mb[0] = MBC_ABORT_TARGET; 896 mcp->mb[0] = MBC_ABORT_TARGET;
@@ -919,18 +910,17 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
919 mcp->flags = 0; 910 mcp->flags = 0;
920 rval = qla2x00_mailbox_command(vha, mcp); 911 rval = qla2x00_mailbox_command(vha, mcp);
921 if (rval != QLA_SUCCESS) { 912 if (rval != QLA_SUCCESS) {
922 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 913 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
923 vha->host_no, rval));
924 } 914 }
925 915
926 /* Issue marker IOCB. */ 916 /* Issue marker IOCB. */
927 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, 917 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
928 MK_SYNC_ID); 918 MK_SYNC_ID);
929 if (rval2 != QLA_SUCCESS) { 919 if (rval2 != QLA_SUCCESS) {
930 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 920 ql_dbg(ql_dbg_mbx, vha, 0x1040,
931 "(%x).\n", __func__, vha->host_no, rval2)); 921 "Failed to issue marker IOCB (%x).\n", rval2);
932 } else { 922 } else {
933 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 923 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
934 } 924 }
935 925
936 return rval; 926 return rval;
@@ -946,9 +936,10 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
946 struct req_que *req; 936 struct req_que *req;
947 struct rsp_que *rsp; 937 struct rsp_que *rsp;
948 938
949 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
950
951 vha = fcport->vha; 939 vha = fcport->vha;
940
941 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
942
952 req = vha->hw->req_q_map[0]; 943 req = vha->hw->req_q_map[0];
953 rsp = req->rsp; 944 rsp = req->rsp;
954 mcp->mb[0] = MBC_LUN_RESET; 945 mcp->mb[0] = MBC_LUN_RESET;
@@ -966,18 +957,17 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
966 mcp->flags = 0; 957 mcp->flags = 0;
967 rval = qla2x00_mailbox_command(vha, mcp); 958 rval = qla2x00_mailbox_command(vha, mcp);
968 if (rval != QLA_SUCCESS) { 959 if (rval != QLA_SUCCESS) {
969 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 960 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
970 vha->host_no, rval));
971 } 961 }
972 962
973 /* Issue marker IOCB. */ 963 /* Issue marker IOCB. */
974 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 964 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
975 MK_SYNC_ID_LUN); 965 MK_SYNC_ID_LUN);
976 if (rval2 != QLA_SUCCESS) { 966 if (rval2 != QLA_SUCCESS) {
977 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 967 ql_dbg(ql_dbg_mbx, vha, 0x1044,
978 "(%x).\n", __func__, vha->host_no, rval2)); 968 "Failed to issue marker IOCB (%x).\n", rval2);
979 } else { 969 } else {
980 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 970 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
981 } 971 }
982 972
983 return rval; 973 return rval;
@@ -1011,8 +1001,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1011 mbx_cmd_t mc; 1001 mbx_cmd_t mc;
1012 mbx_cmd_t *mcp = &mc; 1002 mbx_cmd_t *mcp = &mc;
1013 1003
1014 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 1004 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
1015 vha->host_no));
1016 1005
1017 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1006 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1018 mcp->mb[9] = vha->vp_idx; 1007 mcp->mb[9] = vha->vp_idx;
@@ -1038,11 +1027,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1038 1027
1039 if (rval != QLA_SUCCESS) { 1028 if (rval != QLA_SUCCESS) {
1040 /*EMPTY*/ 1029 /*EMPTY*/
1041 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 1030 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1042 vha->host_no, rval));
1043 } else { 1031 } else {
1044 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 1032 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
1045 vha->host_no));
1046 1033
1047 if (IS_QLA8XXX_TYPE(vha->hw)) { 1034 if (IS_QLA8XXX_TYPE(vha->hw)) {
1048 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1035 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1083,8 +1070,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1083 mbx_cmd_t mc; 1070 mbx_cmd_t mc;
1084 mbx_cmd_t *mcp = &mc; 1071 mbx_cmd_t *mcp = &mc;
1085 1072
1086 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 1073 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
1087 vha->host_no));
1088 1074
1089 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1075 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1090 mcp->out_mb = MBX_0; 1076 mcp->out_mb = MBX_0;
@@ -1095,8 +1081,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1095 1081
1096 if (rval != QLA_SUCCESS) { 1082 if (rval != QLA_SUCCESS) {
1097 /*EMPTY*/ 1083 /*EMPTY*/
1098 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 1084 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1099 vha->host_no, mcp->mb[0])); 1085 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1100 } else { 1086 } else {
1101 /* Convert returned data and check our values. */ 1087 /* Convert returned data and check our values. */
1102 *r_a_tov = mcp->mb[3] / 2; 1088 *r_a_tov = mcp->mb[3] / 2;
@@ -1107,8 +1093,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1107 *tov = ratov; 1093 *tov = ratov;
1108 } 1094 }
1109 1095
1110 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 1096 ql_dbg(ql_dbg_mbx, vha, 0x104b,
1111 "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov)); 1097 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1112 } 1098 }
1113 1099
1114 return rval; 1100 return rval;
@@ -1139,8 +1125,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1139 mbx_cmd_t *mcp = &mc; 1125 mbx_cmd_t *mcp = &mc;
1140 struct qla_hw_data *ha = vha->hw; 1126 struct qla_hw_data *ha = vha->hw;
1141 1127
1142 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1128 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
1143 vha->host_no));
1144 1129
1145 if (IS_QLA82XX(ha) && ql2xdbwr) 1130 if (IS_QLA82XX(ha) && ql2xdbwr)
1146 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1131 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1174,13 +1159,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1174 1159
1175 if (rval != QLA_SUCCESS) { 1160 if (rval != QLA_SUCCESS) {
1176 /*EMPTY*/ 1161 /*EMPTY*/
1177 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1162 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1178 "mb0=%x.\n", 1163 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1179 vha->host_no, rval, mcp->mb[0]));
1180 } else { 1164 } else {
1181 /*EMPTY*/ 1165 /*EMPTY*/
1182 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1166 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
1183 vha->host_no));
1184 } 1167 }
1185 1168
1186 return rval; 1169 return rval;
@@ -1213,13 +1196,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1213 dma_addr_t pd_dma; 1196 dma_addr_t pd_dma;
1214 struct qla_hw_data *ha = vha->hw; 1197 struct qla_hw_data *ha = vha->hw;
1215 1198
1216 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1199 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
1217 1200
1218 pd24 = NULL; 1201 pd24 = NULL;
1219 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1202 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1220 if (pd == NULL) { 1203 if (pd == NULL) {
1221 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " 1204 ql_log(ql_log_warn, vha, 0x1050,
1222 "structure.\n", __func__, vha->host_no)); 1205 "Failed to allocate port database structure.\n");
1223 return QLA_MEMORY_ALLOC_FAILED; 1206 return QLA_MEMORY_ALLOC_FAILED;
1224 } 1207 }
1225 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1208 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1261,12 +1244,10 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1261 /* Check for logged in state. */ 1244 /* Check for logged in state. */
1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE && 1245 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1263 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1246 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1264 DEBUG2(qla_printk(KERN_WARNING, ha, 1247 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1265 "scsi(%ld): Unable to verify login-state (%x/%x) " 1248 "Unable to verify login-state (%x/%x) for "
1266 " - portid=%02x%02x%02x.\n", vha->host_no, 1249 "loop_id %x.\n", pd24->current_login_state,
1267 pd24->current_login_state, pd24->last_login_state, 1250 pd24->last_login_state, fcport->loop_id);
1268 fcport->d_id.b.domain, fcport->d_id.b.area,
1269 fcport->d_id.b.al_pa));
1270 rval = QLA_FUNCTION_FAILED; 1251 rval = QLA_FUNCTION_FAILED;
1271 goto gpd_error_out; 1252 goto gpd_error_out;
1272 } 1253 }
@@ -1290,12 +1271,11 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1290 /* Check for logged in state. */ 1271 /* Check for logged in state. */
1291 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1272 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1292 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1273 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1293 DEBUG2(qla_printk(KERN_WARNING, ha, 1274 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1294 "scsi(%ld): Unable to verify login-state (%x/%x) " 1275 "Unable to verify login-state (%x/%x) - "
1295 " - portid=%02x%02x%02x.\n", vha->host_no, 1276 "portid=%02x%02x%02x.\n", pd->master_state,
1296 pd->master_state, pd->slave_state, 1277 pd->slave_state, fcport->d_id.b.domain,
1297 fcport->d_id.b.domain, fcport->d_id.b.area, 1278 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1298 fcport->d_id.b.al_pa));
1299 rval = QLA_FUNCTION_FAILED; 1279 rval = QLA_FUNCTION_FAILED;
1300 goto gpd_error_out; 1280 goto gpd_error_out;
1301 } 1281 }
@@ -1325,10 +1305,11 @@ gpd_error_out:
1325 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1305 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1326 1306
1327 if (rval != QLA_SUCCESS) { 1307 if (rval != QLA_SUCCESS) {
1328 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 1308 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1329 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1309 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1310 mcp->mb[0], mcp->mb[1]);
1330 } else { 1311 } else {
1331 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1312 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
1332 } 1313 }
1333 1314
1334 return rval; 1315 return rval;
@@ -1357,8 +1338,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1357 mbx_cmd_t mc; 1338 mbx_cmd_t mc;
1358 mbx_cmd_t *mcp = &mc; 1339 mbx_cmd_t *mcp = &mc;
1359 1340
1360 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1341 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
1361 vha->host_no));
1362 1342
1363 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1343 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1364 mcp->out_mb = MBX_0; 1344 mcp->out_mb = MBX_0;
@@ -1381,12 +1361,10 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1381 1361
1382 if (rval != QLA_SUCCESS) { 1362 if (rval != QLA_SUCCESS) {
1383 /*EMPTY*/ 1363 /*EMPTY*/
1384 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1364 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1385 "failed=%x.\n", vha->host_no, rval));
1386 } else { 1365 } else {
1387 /*EMPTY*/ 1366 /*EMPTY*/
1388 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1367 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
1389 vha->host_no));
1390 } 1368 }
1391 1369
1392 return rval; 1370 return rval;
@@ -1418,8 +1396,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1418 mbx_cmd_t mc; 1396 mbx_cmd_t mc;
1419 mbx_cmd_t *mcp = &mc; 1397 mbx_cmd_t *mcp = &mc;
1420 1398
1421 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1399 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
1422 vha->host_no));
1423 1400
1424 mcp->mb[0] = MBC_GET_PORT_NAME; 1401 mcp->mb[0] = MBC_GET_PORT_NAME;
1425 mcp->mb[9] = vha->vp_idx; 1402 mcp->mb[9] = vha->vp_idx;
@@ -1439,8 +1416,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1439 1416
1440 if (rval != QLA_SUCCESS) { 1417 if (rval != QLA_SUCCESS) {
1441 /*EMPTY*/ 1418 /*EMPTY*/
1442 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1419 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1443 vha->host_no, rval));
1444 } else { 1420 } else {
1445 if (name != NULL) { 1421 if (name != NULL) {
1446 /* This function returns name in big endian. */ 1422 /* This function returns name in big endian. */
@@ -1454,8 +1430,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1454 name[7] = LSB(mcp->mb[7]); 1430 name[7] = LSB(mcp->mb[7]);
1455 } 1431 }
1456 1432
1457 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1433 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
1458 vha->host_no));
1459 } 1434 }
1460 1435
1461 return rval; 1436 return rval;
@@ -1483,7 +1458,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1483 mbx_cmd_t mc; 1458 mbx_cmd_t mc;
1484 mbx_cmd_t *mcp = &mc; 1459 mbx_cmd_t *mcp = &mc;
1485 1460
1486 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1461 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
1487 1462
1488 if (IS_QLA8XXX_TYPE(vha->hw)) { 1463 if (IS_QLA8XXX_TYPE(vha->hw)) {
1489 /* Logout across all FCFs. */ 1464 /* Logout across all FCFs. */
@@ -1517,11 +1492,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1517 1492
1518 if (rval != QLA_SUCCESS) { 1493 if (rval != QLA_SUCCESS) {
1519 /*EMPTY*/ 1494 /*EMPTY*/
1520 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1495 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1521 __func__, vha->host_no, rval));
1522 } else { 1496 } else {
1523 /*EMPTY*/ 1497 /*EMPTY*/
1524 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1498 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
1525 } 1499 }
1526 1500
1527 return rval; 1501 return rval;
@@ -1553,12 +1527,11 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1553 mbx_cmd_t mc; 1527 mbx_cmd_t mc;
1554 mbx_cmd_t *mcp = &mc; 1528 mbx_cmd_t *mcp = &mc;
1555 1529
1556 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1530 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
1557 vha->host_no));
1558 1531
1559 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1532 ql_dbg(ql_dbg_mbx, vha, 0x105e,
1560 "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, 1533 "Retry cnt=%d ratov=%d total tov=%d.\n",
1561 mcp->tov)); 1534 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1562 1535
1563 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1536 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1564 mcp->mb[1] = cmd_size; 1537 mcp->mb[1] = cmd_size;
@@ -1575,13 +1548,12 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1575 1548
1576 if (rval != QLA_SUCCESS) { 1549 if (rval != QLA_SUCCESS) {
1577 /*EMPTY*/ 1550 /*EMPTY*/
1578 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1551 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1579 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1552 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1580 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1553 rval, mcp->mb[0], mcp->mb[1]);
1581 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1582 } else { 1554 } else {
1583 /*EMPTY*/ 1555 /*EMPTY*/
1584 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no)); 1556 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
1585 } 1557 }
1586 1558
1587 return rval; 1559 return rval;
@@ -1600,7 +1572,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1600 struct req_que *req; 1572 struct req_que *req;
1601 struct rsp_que *rsp; 1573 struct rsp_que *rsp;
1602 1574
1603 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1575 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
1604 1576
1605 if (ha->flags.cpu_affinity_enabled) 1577 if (ha->flags.cpu_affinity_enabled)
1606 req = ha->req_q_map[0]; 1578 req = ha->req_q_map[0];
@@ -1610,8 +1582,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1610 1582
1611 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1583 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1612 if (lg == NULL) { 1584 if (lg == NULL) {
1613 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1585 ql_log(ql_log_warn, vha, 0x1062,
1614 __func__, vha->host_no)); 1586 "Failed to allocate login IOCB.\n");
1615 return QLA_MEMORY_ALLOC_FAILED; 1587 return QLA_MEMORY_ALLOC_FAILED;
1616 } 1588 }
1617 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1589 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1631,21 +1603,21 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1631 lg->vp_index = vha->vp_idx; 1603 lg->vp_index = vha->vp_idx;
1632 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1604 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1633 if (rval != QLA_SUCCESS) { 1605 if (rval != QLA_SUCCESS) {
1634 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1606 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1635 "(%x).\n", __func__, vha->host_no, rval)); 1607 "Failed to issue login IOCB (%x).\n", rval);
1636 } else if (lg->entry_status != 0) { 1608 } else if (lg->entry_status != 0) {
1637 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1609 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1638 "-- error status (%x).\n", __func__, vha->host_no, 1610 "Failed to complete IOCB -- error status (%x).\n",
1639 lg->entry_status)); 1611 lg->entry_status);
1640 rval = QLA_FUNCTION_FAILED; 1612 rval = QLA_FUNCTION_FAILED;
1641 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1613 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1642 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1614 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1643 iop[1] = le32_to_cpu(lg->io_parameter[1]); 1615 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1644 1616
1645 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1617 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1646 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1618 "Failed to complete IOCB -- completion status (%x) "
1647 vha->host_no, le16_to_cpu(lg->comp_status), iop[0], 1619 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1648 iop[1])); 1620 iop[0], iop[1]);
1649 1621
1650 switch (iop[0]) { 1622 switch (iop[0]) {
1651 case LSC_SCODE_PORTID_USED: 1623 case LSC_SCODE_PORTID_USED:
@@ -1673,7 +1645,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1673 break; 1645 break;
1674 } 1646 }
1675 } else { 1647 } else {
1676 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1648 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
1677 1649
1678 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1650 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1679 1651
@@ -1728,7 +1700,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1728 mbx_cmd_t *mcp = &mc; 1700 mbx_cmd_t *mcp = &mc;
1729 struct qla_hw_data *ha = vha->hw; 1701 struct qla_hw_data *ha = vha->hw;
1730 1702
1731 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no)); 1703 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
1732 1704
1733 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1705 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1734 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1706 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1771,13 +1743,12 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1771 rval = QLA_SUCCESS; 1743 rval = QLA_SUCCESS;
1772 1744
1773 /*EMPTY*/ 1745 /*EMPTY*/
1774 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1746 ql_dbg(ql_dbg_mbx, vha, 0x1068,
1775 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval, 1747 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
1776 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 1748 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1777 } else { 1749 } else {
1778 /*EMPTY*/ 1750 /*EMPTY*/
1779 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1751 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
1780 vha->host_no));
1781 } 1752 }
1782 1753
1783 return rval; 1754 return rval;
@@ -1808,13 +1779,13 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1808 mbx_cmd_t *mcp = &mc; 1779 mbx_cmd_t *mcp = &mc;
1809 struct qla_hw_data *ha = vha->hw; 1780 struct qla_hw_data *ha = vha->hw;
1810 1781
1782 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
1783
1811 if (IS_FWI2_CAPABLE(ha)) 1784 if (IS_FWI2_CAPABLE(ha))
1812 return qla24xx_login_fabric(vha, fcport->loop_id, 1785 return qla24xx_login_fabric(vha, fcport->loop_id,
1813 fcport->d_id.b.domain, fcport->d_id.b.area, 1786 fcport->d_id.b.domain, fcport->d_id.b.area,
1814 fcport->d_id.b.al_pa, mb_ret, opt); 1787 fcport->d_id.b.al_pa, mb_ret, opt);
1815 1788
1816 DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1817
1818 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1789 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1819 if (HAS_EXTENDED_IDS(ha)) 1790 if (HAS_EXTENDED_IDS(ha))
1820 mcp->mb[1] = fcport->loop_id; 1791 mcp->mb[1] = fcport->loop_id;
@@ -1845,15 +1816,12 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1845 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 1816 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
1846 rval = QLA_SUCCESS; 1817 rval = QLA_SUCCESS;
1847 1818
1848 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1819 ql_dbg(ql_dbg_mbx, vha, 0x106b,
1849 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval, 1820 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
1850 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1821 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1851 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1852 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1853 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1854 } else { 1822 } else {
1855 /*EMPTY*/ 1823 /*EMPTY*/
1856 DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1824 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
1857 } 1825 }
1858 1826
1859 return (rval); 1827 return (rval);
@@ -1870,12 +1838,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1870 struct req_que *req; 1838 struct req_que *req;
1871 struct rsp_que *rsp; 1839 struct rsp_que *rsp;
1872 1840
1873 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1841 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
1874 1842
1875 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1843 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1876 if (lg == NULL) { 1844 if (lg == NULL) {
1877 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", 1845 ql_log(ql_log_warn, vha, 0x106e,
1878 __func__, vha->host_no)); 1846 "Failed to allocate logout IOCB.\n");
1879 return QLA_MEMORY_ALLOC_FAILED; 1847 return QLA_MEMORY_ALLOC_FAILED;
1880 } 1848 }
1881 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1849 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1899,22 +1867,22 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1899 1867
1900 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1868 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1901 if (rval != QLA_SUCCESS) { 1869 if (rval != QLA_SUCCESS) {
1902 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1870 ql_dbg(ql_dbg_mbx, vha, 0x106f,
1903 "(%x).\n", __func__, vha->host_no, rval)); 1871 "Failed to issue logout IOCB (%x).\n", rval);
1904 } else if (lg->entry_status != 0) { 1872 } else if (lg->entry_status != 0) {
1905 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1873 ql_dbg(ql_dbg_mbx, vha, 0x1070,
1906 "-- error status (%x).\n", __func__, vha->host_no, 1874 "Failed to complete IOCB -- error status (%x).\n",
1907 lg->entry_status)); 1875 lg->entry_status);
1908 rval = QLA_FUNCTION_FAILED; 1876 rval = QLA_FUNCTION_FAILED;
1909 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1877 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1910 DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB " 1878 ql_dbg(ql_dbg_mbx, vha, 0x1071,
1911 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1879 "Failed to complete IOCB -- completion status (%x) "
1912 vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status), 1880 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1913 le32_to_cpu(lg->io_parameter[0]), 1881 le32_to_cpu(lg->io_parameter[0]),
1914 le32_to_cpu(lg->io_parameter[1]))); 1882 le32_to_cpu(lg->io_parameter[1]));
1915 } else { 1883 } else {
1916 /*EMPTY*/ 1884 /*EMPTY*/
1917 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1885 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
1918 } 1886 }
1919 1887
1920 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1888 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1946,8 +1914,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1946 mbx_cmd_t mc; 1914 mbx_cmd_t mc;
1947 mbx_cmd_t *mcp = &mc; 1915 mbx_cmd_t *mcp = &mc;
1948 1916
1949 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1917 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
1950 vha->host_no));
1951 1918
1952 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1919 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1953 mcp->out_mb = MBX_1|MBX_0; 1920 mcp->out_mb = MBX_1|MBX_0;
@@ -1966,12 +1933,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1966 1933
1967 if (rval != QLA_SUCCESS) { 1934 if (rval != QLA_SUCCESS) {
1968 /*EMPTY*/ 1935 /*EMPTY*/
1969 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1936 ql_dbg(ql_dbg_mbx, vha, 0x1074,
1970 "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1])); 1937 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
1971 } else { 1938 } else {
1972 /*EMPTY*/ 1939 /*EMPTY*/
1973 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1940 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
1974 vha->host_no));
1975 } 1941 }
1976 1942
1977 return rval; 1943 return rval;
@@ -1999,8 +1965,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1999 mbx_cmd_t mc; 1965 mbx_cmd_t mc;
2000 mbx_cmd_t *mcp = &mc; 1966 mbx_cmd_t *mcp = &mc;
2001 1967
2002 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1968 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
2003 vha->host_no));
2004 1969
2005 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1970 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2006 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 1971 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2014,12 +1979,10 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2014 1979
2015 if (rval != QLA_SUCCESS) { 1980 if (rval != QLA_SUCCESS) {
2016 /*EMPTY*/ 1981 /*EMPTY*/
2017 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1982 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2018 vha->host_no, rval));
2019 } else { 1983 } else {
2020 /*EMPTY*/ 1984 /*EMPTY*/
2021 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1985 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
2022 vha->host_no));
2023 } 1986 }
2024 1987
2025 return rval; 1988 return rval;
@@ -2045,8 +2008,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2045 mbx_cmd_t mc; 2008 mbx_cmd_t mc;
2046 mbx_cmd_t *mcp = &mc; 2009 mbx_cmd_t *mcp = &mc;
2047 2010
2048 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 2011 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
2049 vha->host_no));
2050 2012
2051 if (id_list == NULL) 2013 if (id_list == NULL)
2052 return QLA_FUNCTION_FAILED; 2014 return QLA_FUNCTION_FAILED;
@@ -2075,12 +2037,10 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2075 2037
2076 if (rval != QLA_SUCCESS) { 2038 if (rval != QLA_SUCCESS) {
2077 /*EMPTY*/ 2039 /*EMPTY*/
2078 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 2040 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2079 vha->host_no, rval));
2080 } else { 2041 } else {
2081 *entries = mcp->mb[1]; 2042 *entries = mcp->mb[1];
2082 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 2043 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
2083 vha->host_no));
2084 } 2044 }
2085 2045
2086 return rval; 2046 return rval;
@@ -2108,7 +2068,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2108 mbx_cmd_t mc; 2068 mbx_cmd_t mc;
2109 mbx_cmd_t *mcp = &mc; 2069 mbx_cmd_t *mcp = &mc;
2110 2070
2111 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2071 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
2112 2072
2113 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2073 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2114 mcp->out_mb = MBX_0; 2074 mcp->out_mb = MBX_0;
@@ -2121,14 +2081,14 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2121 2081
2122 if (rval != QLA_SUCCESS) { 2082 if (rval != QLA_SUCCESS) {
2123 /*EMPTY*/ 2083 /*EMPTY*/
2124 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 2084 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2125 vha->host_no, mcp->mb[0])); 2085 "Failed mb[0]=%x.\n", mcp->mb[0]);
2126 } else { 2086 } else {
2127 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 2087 ql_dbg(ql_dbg_mbx, vha, 0x107e,
2128 "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__, 2088 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2129 vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3], 2089 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2130 mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], 2090 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2131 mcp->mb[12])); 2091 mcp->mb[11], mcp->mb[12]);
2132 2092
2133 if (cur_xchg_cnt) 2093 if (cur_xchg_cnt)
2134 *cur_xchg_cnt = mcp->mb[3]; 2094 *cur_xchg_cnt = mcp->mb[3];
@@ -2147,7 +2107,6 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2147 return (rval); 2107 return (rval);
2148} 2108}
2149 2109
2150#if defined(QL_DEBUG_LEVEL_3)
2151/* 2110/*
2152 * qla2x00_get_fcal_position_map 2111 * qla2x00_get_fcal_position_map
2153 * Get FCAL (LILP) position map using mailbox command 2112 * Get FCAL (LILP) position map using mailbox command
@@ -2172,10 +2131,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2172 dma_addr_t pmap_dma; 2131 dma_addr_t pmap_dma;
2173 struct qla_hw_data *ha = vha->hw; 2132 struct qla_hw_data *ha = vha->hw;
2174 2133
2134 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
2135
2175 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2136 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2176 if (pmap == NULL) { 2137 if (pmap == NULL) {
2177 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2138 ql_log(ql_log_warn, vha, 0x1080,
2178 __func__, vha->host_no)); 2139 "Memory alloc failed.\n");
2179 return QLA_MEMORY_ALLOC_FAILED; 2140 return QLA_MEMORY_ALLOC_FAILED;
2180 } 2141 }
2181 memset(pmap, 0, FCAL_MAP_SIZE); 2142 memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2193,10 +2154,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2193 rval = qla2x00_mailbox_command(vha, mcp); 2154 rval = qla2x00_mailbox_command(vha, mcp);
2194 2155
2195 if (rval == QLA_SUCCESS) { 2156 if (rval == QLA_SUCCESS) {
2196 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " 2157 ql_dbg(ql_dbg_mbx, vha, 0x1081,
2197 "size (%x)\n", __func__, vha->host_no, mcp->mb[0], 2158 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2198 mcp->mb[1], (unsigned)pmap[0])); 2159 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2199 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); 2160 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2161 pmap, pmap[0] + 1);
2200 2162
2201 if (pos_map) 2163 if (pos_map)
2202 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2164 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
@@ -2204,15 +2166,13 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2204 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2166 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2205 2167
2206 if (rval != QLA_SUCCESS) { 2168 if (rval != QLA_SUCCESS) {
2207 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2169 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2208 vha->host_no, rval));
2209 } else { 2170 } else {
2210 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2171 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
2211 } 2172 }
2212 2173
2213 return rval; 2174 return rval;
2214} 2175}
2215#endif
2216 2176
2217/* 2177/*
2218 * qla2x00_get_link_status 2178 * qla2x00_get_link_status
@@ -2237,7 +2197,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2237 uint32_t *siter, *diter, dwords; 2197 uint32_t *siter, *diter, dwords;
2238 struct qla_hw_data *ha = vha->hw; 2198 struct qla_hw_data *ha = vha->hw;
2239 2199
2240 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2200 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
2241 2201
2242 mcp->mb[0] = MBC_GET_LINK_STATUS; 2202 mcp->mb[0] = MBC_GET_LINK_STATUS;
2243 mcp->mb[2] = MSW(stats_dma); 2203 mcp->mb[2] = MSW(stats_dma);
@@ -2266,11 +2226,12 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2266 2226
2267 if (rval == QLA_SUCCESS) { 2227 if (rval == QLA_SUCCESS) {
2268 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2228 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2269 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2229 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2270 __func__, vha->host_no, mcp->mb[0])); 2230 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2271 rval = QLA_FUNCTION_FAILED; 2231 rval = QLA_FUNCTION_FAILED;
2272 } else { 2232 } else {
2273 /* Copy over data -- firmware data is LE. */ 2233 /* Copy over data -- firmware data is LE. */
2234 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
2274 dwords = offsetof(struct link_statistics, unused1) / 4; 2235 dwords = offsetof(struct link_statistics, unused1) / 4;
2275 siter = diter = &stats->link_fail_cnt; 2236 siter = diter = &stats->link_fail_cnt;
2276 while (dwords--) 2237 while (dwords--)
@@ -2278,8 +2239,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2278 } 2239 }
2279 } else { 2240 } else {
2280 /* Failed. */ 2241 /* Failed. */
2281 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2242 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2282 vha->host_no, rval));
2283 } 2243 }
2284 2244
2285 return rval; 2245 return rval;
@@ -2294,7 +2254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2294 mbx_cmd_t *mcp = &mc; 2254 mbx_cmd_t *mcp = &mc;
2295 uint32_t *siter, *diter, dwords; 2255 uint32_t *siter, *diter, dwords;
2296 2256
2297 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2257 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
2298 2258
2299 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2259 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2300 mcp->mb[2] = MSW(stats_dma); 2260 mcp->mb[2] = MSW(stats_dma);
@@ -2312,10 +2272,11 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2312 2272
2313 if (rval == QLA_SUCCESS) { 2273 if (rval == QLA_SUCCESS) {
2314 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2274 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2315 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2275 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2316 __func__, vha->host_no, mcp->mb[0])); 2276 "Failed mb[0]=%x.\n", mcp->mb[0]);
2317 rval = QLA_FUNCTION_FAILED; 2277 rval = QLA_FUNCTION_FAILED;
2318 } else { 2278 } else {
2279 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
2319 /* Copy over data -- firmware data is LE. */ 2280 /* Copy over data -- firmware data is LE. */
2320 dwords = sizeof(struct link_statistics) / 4; 2281 dwords = sizeof(struct link_statistics) / 4;
2321 siter = diter = &stats->link_fail_cnt; 2282 siter = diter = &stats->link_fail_cnt;
@@ -2324,8 +2285,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2324 } 2285 }
2325 } else { 2286 } else {
2326 /* Failed. */ 2287 /* Failed. */
2327 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2288 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2328 vha->host_no, rval));
2329 } 2289 }
2330 2290
2331 return rval; 2291 return rval;
@@ -2345,7 +2305,7 @@ qla24xx_abort_command(srb_t *sp)
2345 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
2346 struct req_que *req = vha->req; 2306 struct req_que *req = vha->req;
2347 2307
2348 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2308 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
2349 2309
2350 spin_lock_irqsave(&ha->hardware_lock, flags); 2310 spin_lock_irqsave(&ha->hardware_lock, flags);
2351 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2311 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2360,8 +2320,8 @@ qla24xx_abort_command(srb_t *sp)
2360 2320
2361 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 2321 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2362 if (abt == NULL) { 2322 if (abt == NULL) {
2363 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", 2323 ql_log(ql_log_warn, vha, 0x108d,
2364 __func__, vha->host_no)); 2324 "Failed to allocate abort IOCB.\n");
2365 return QLA_MEMORY_ALLOC_FAILED; 2325 return QLA_MEMORY_ALLOC_FAILED;
2366 } 2326 }
2367 memset(abt, 0, sizeof(struct abort_entry_24xx)); 2327 memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2380,20 +2340,20 @@ qla24xx_abort_command(srb_t *sp)
2380 2340
2381 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 2341 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2382 if (rval != QLA_SUCCESS) { 2342 if (rval != QLA_SUCCESS) {
2383 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2343 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2384 __func__, vha->host_no, rval)); 2344 "Failed to issue IOCB (%x).\n", rval);
2385 } else if (abt->entry_status != 0) { 2345 } else if (abt->entry_status != 0) {
2386 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2346 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2387 "-- error status (%x).\n", __func__, vha->host_no, 2347 "Failed to complete IOCB -- error status (%x).\n",
2388 abt->entry_status)); 2348 abt->entry_status);
2389 rval = QLA_FUNCTION_FAILED; 2349 rval = QLA_FUNCTION_FAILED;
2390 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2350 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2391 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2351 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2392 "-- completion status (%x).\n", __func__, vha->host_no, 2352 "Failed to complete IOCB -- completion status (%x).\n",
2393 le16_to_cpu(abt->nport_handle))); 2353 le16_to_cpu(abt->nport_handle));
2394 rval = QLA_FUNCTION_FAILED; 2354 rval = QLA_FUNCTION_FAILED;
2395 } else { 2355 } else {
2396 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2356 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
2397 } 2357 }
2398 2358
2399 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2359 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2421,19 +2381,20 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2421 struct req_que *req; 2381 struct req_que *req;
2422 struct rsp_que *rsp; 2382 struct rsp_que *rsp;
2423 2383
2424 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
2425
2426 vha = fcport->vha; 2384 vha = fcport->vha;
2427 ha = vha->hw; 2385 ha = vha->hw;
2428 req = vha->req; 2386 req = vha->req;
2387
2388 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
2389
2429 if (ha->flags.cpu_affinity_enabled) 2390 if (ha->flags.cpu_affinity_enabled)
2430 rsp = ha->rsp_q_map[tag + 1]; 2391 rsp = ha->rsp_q_map[tag + 1];
2431 else 2392 else
2432 rsp = req->rsp; 2393 rsp = req->rsp;
2433 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2394 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2434 if (tsk == NULL) { 2395 if (tsk == NULL) {
2435 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2396 ql_log(ql_log_warn, vha, 0x1093,
2436 "IOCB.\n", __func__, vha->host_no)); 2397 "Failed to allocate task management IOCB.\n");
2437 return QLA_MEMORY_ALLOC_FAILED; 2398 return QLA_MEMORY_ALLOC_FAILED;
2438 } 2399 }
2439 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); 2400 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2457,30 +2418,30 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2457 sts = &tsk->p.sts; 2418 sts = &tsk->p.sts;
2458 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 2419 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2459 if (rval != QLA_SUCCESS) { 2420 if (rval != QLA_SUCCESS) {
2460 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " 2421 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2461 "(%x).\n", __func__, vha->host_no, name, rval)); 2422 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2462 } else if (sts->entry_status != 0) { 2423 } else if (sts->entry_status != 0) {
2463 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2424 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2464 "-- error status (%x).\n", __func__, vha->host_no, 2425 "Failed to complete IOCB -- error status (%x).\n",
2465 sts->entry_status)); 2426 sts->entry_status);
2466 rval = QLA_FUNCTION_FAILED; 2427 rval = QLA_FUNCTION_FAILED;
2467 } else if (sts->comp_status != 2428 } else if (sts->comp_status !=
2468 __constant_cpu_to_le16(CS_COMPLETE)) { 2429 __constant_cpu_to_le16(CS_COMPLETE)) {
2469 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2430 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2470 "-- completion status (%x).\n", __func__, 2431 "Failed to complete IOCB -- completion status (%x).\n",
2471 vha->host_no, le16_to_cpu(sts->comp_status))); 2432 le16_to_cpu(sts->comp_status));
2472 rval = QLA_FUNCTION_FAILED; 2433 rval = QLA_FUNCTION_FAILED;
2473 } else if (le16_to_cpu(sts->scsi_status) & 2434 } else if (le16_to_cpu(sts->scsi_status) &
2474 SS_RESPONSE_INFO_LEN_VALID) { 2435 SS_RESPONSE_INFO_LEN_VALID) {
2475 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2436 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2476 DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent " 2437 ql_dbg(ql_dbg_mbx, vha, 0x1097,
2477 "data length -- not enough response info (%d).\n", 2438 "Ignoring inconsistent data length -- not enough "
2478 __func__, vha->host_no, 2439 "response info (%d).\n",
2479 le32_to_cpu(sts->rsp_data_len))); 2440 le32_to_cpu(sts->rsp_data_len));
2480 } else if (sts->data[3]) { 2441 } else if (sts->data[3]) {
2481 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2442 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2482 "-- response (%x).\n", __func__, 2443 "Failed to complete IOCB -- response (%x).\n",
2483 vha->host_no, sts->data[3])); 2444 sts->data[3]);
2484 rval = QLA_FUNCTION_FAILED; 2445 rval = QLA_FUNCTION_FAILED;
2485 } 2446 }
2486 } 2447 }
@@ -2489,10 +2450,10 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2489 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 2450 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2490 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 2451 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2491 if (rval2 != QLA_SUCCESS) { 2452 if (rval2 != QLA_SUCCESS) {
2492 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2453 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2493 "(%x).\n", __func__, vha->host_no, rval2)); 2454 "Failed to issue marker IOCB (%x).\n", rval2);
2494 } else { 2455 } else {
2495 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2456 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
2496 } 2457 }
2497 2458
2498 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 2459 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2533,7 +2494,7 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2533 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2494 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2534 return QLA_FUNCTION_FAILED; 2495 return QLA_FUNCTION_FAILED;
2535 2496
2536 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2497 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
2537 2498
2538 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2499 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2539 mcp->out_mb = MBX_0; 2500 mcp->out_mb = MBX_0;
@@ -2543,10 +2504,9 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2543 rval = qla2x00_mailbox_command(vha, mcp); 2504 rval = qla2x00_mailbox_command(vha, mcp);
2544 2505
2545 if (rval != QLA_SUCCESS) { 2506 if (rval != QLA_SUCCESS) {
2546 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2507 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2547 vha->host_no, rval));
2548 } else { 2508 } else {
2549 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2509 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
2550 } 2510 }
2551 2511
2552 return rval; 2512 return rval;
@@ -2566,7 +2526,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2566 mbx_cmd_t mc; 2526 mbx_cmd_t mc;
2567 mbx_cmd_t *mcp = &mc; 2527 mbx_cmd_t *mcp = &mc;
2568 2528
2569 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2529 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
2570 2530
2571 mcp->mb[0] = MBC_SERDES_PARAMS; 2531 mcp->mb[0] = MBC_SERDES_PARAMS;
2572 mcp->mb[1] = BIT_0; 2532 mcp->mb[1] = BIT_0;
@@ -2581,11 +2541,11 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2581 2541
2582 if (rval != QLA_SUCCESS) { 2542 if (rval != QLA_SUCCESS) {
2583 /*EMPTY*/ 2543 /*EMPTY*/
2584 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2544 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2585 vha->host_no, rval, mcp->mb[0])); 2545 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2586 } else { 2546 } else {
2587 /*EMPTY*/ 2547 /*EMPTY*/
2588 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2548 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
2589 } 2549 }
2590 2550
2591 return rval; 2551 return rval;
@@ -2601,7 +2561,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2601 if (!IS_FWI2_CAPABLE(vha->hw)) 2561 if (!IS_FWI2_CAPABLE(vha->hw))
2602 return QLA_FUNCTION_FAILED; 2562 return QLA_FUNCTION_FAILED;
2603 2563
2604 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2564 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
2605 2565
2606 mcp->mb[0] = MBC_STOP_FIRMWARE; 2566 mcp->mb[0] = MBC_STOP_FIRMWARE;
2607 mcp->out_mb = MBX_0; 2567 mcp->out_mb = MBX_0;
@@ -2611,12 +2571,11 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2611 rval = qla2x00_mailbox_command(vha, mcp); 2571 rval = qla2x00_mailbox_command(vha, mcp);
2612 2572
2613 if (rval != QLA_SUCCESS) { 2573 if (rval != QLA_SUCCESS) {
2614 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2574 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2615 vha->host_no, rval));
2616 if (mcp->mb[0] == MBS_INVALID_COMMAND) 2575 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2617 rval = QLA_INVALID_COMMAND; 2576 rval = QLA_INVALID_COMMAND;
2618 } else { 2577 } else {
2619 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2578 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
2620 } 2579 }
2621 2580
2622 return rval; 2581 return rval;
@@ -2630,14 +2589,14 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2630 mbx_cmd_t mc; 2589 mbx_cmd_t mc;
2631 mbx_cmd_t *mcp = &mc; 2590 mbx_cmd_t *mcp = &mc;
2632 2591
2592 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
2593
2633 if (!IS_FWI2_CAPABLE(vha->hw)) 2594 if (!IS_FWI2_CAPABLE(vha->hw))
2634 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2635 2596
2636 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2597 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2637 return QLA_FUNCTION_FAILED; 2598 return QLA_FUNCTION_FAILED;
2638 2599
2639 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2640
2641 mcp->mb[0] = MBC_TRACE_CONTROL; 2600 mcp->mb[0] = MBC_TRACE_CONTROL;
2642 mcp->mb[1] = TC_EFT_ENABLE; 2601 mcp->mb[1] = TC_EFT_ENABLE;
2643 mcp->mb[2] = LSW(eft_dma); 2602 mcp->mb[2] = LSW(eft_dma);
@@ -2652,10 +2611,11 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2652 mcp->flags = 0; 2611 mcp->flags = 0;
2653 rval = qla2x00_mailbox_command(vha, mcp); 2612 rval = qla2x00_mailbox_command(vha, mcp);
2654 if (rval != QLA_SUCCESS) { 2613 if (rval != QLA_SUCCESS) {
2655 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2614 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2656 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2615 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2616 rval, mcp->mb[0], mcp->mb[1]);
2657 } else { 2617 } else {
2658 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2618 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
2659 } 2619 }
2660 2620
2661 return rval; 2621 return rval;
@@ -2668,14 +2628,14 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2668 mbx_cmd_t mc; 2628 mbx_cmd_t mc;
2669 mbx_cmd_t *mcp = &mc; 2629 mbx_cmd_t *mcp = &mc;
2670 2630
2631 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
2632
2671 if (!IS_FWI2_CAPABLE(vha->hw)) 2633 if (!IS_FWI2_CAPABLE(vha->hw))
2672 return QLA_FUNCTION_FAILED; 2634 return QLA_FUNCTION_FAILED;
2673 2635
2674 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2636 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2675 return QLA_FUNCTION_FAILED; 2637 return QLA_FUNCTION_FAILED;
2676 2638
2677 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2678
2679 mcp->mb[0] = MBC_TRACE_CONTROL; 2639 mcp->mb[0] = MBC_TRACE_CONTROL;
2680 mcp->mb[1] = TC_EFT_DISABLE; 2640 mcp->mb[1] = TC_EFT_DISABLE;
2681 mcp->out_mb = MBX_1|MBX_0; 2641 mcp->out_mb = MBX_1|MBX_0;
@@ -2684,10 +2644,11 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2684 mcp->flags = 0; 2644 mcp->flags = 0;
2685 rval = qla2x00_mailbox_command(vha, mcp); 2645 rval = qla2x00_mailbox_command(vha, mcp);
2686 if (rval != QLA_SUCCESS) { 2646 if (rval != QLA_SUCCESS) {
2687 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2647 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2688 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2648 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2649 rval, mcp->mb[0], mcp->mb[1]);
2689 } else { 2650 } else {
2690 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2651 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
2691 } 2652 }
2692 2653
2693 return rval; 2654 return rval;
@@ -2701,14 +2662,14 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2701 mbx_cmd_t mc; 2662 mbx_cmd_t mc;
2702 mbx_cmd_t *mcp = &mc; 2663 mbx_cmd_t *mcp = &mc;
2703 2664
2665 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
2666
2704 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) 2667 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2705 return QLA_FUNCTION_FAILED; 2668 return QLA_FUNCTION_FAILED;
2706 2669
2707 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2670 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2708 return QLA_FUNCTION_FAILED; 2671 return QLA_FUNCTION_FAILED;
2709 2672
2710 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2711
2712 mcp->mb[0] = MBC_TRACE_CONTROL; 2673 mcp->mb[0] = MBC_TRACE_CONTROL;
2713 mcp->mb[1] = TC_FCE_ENABLE; 2674 mcp->mb[1] = TC_FCE_ENABLE;
2714 mcp->mb[2] = LSW(fce_dma); 2675 mcp->mb[2] = LSW(fce_dma);
@@ -2727,10 +2688,11 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2727 mcp->flags = 0; 2688 mcp->flags = 0;
2728 rval = qla2x00_mailbox_command(vha, mcp); 2689 rval = qla2x00_mailbox_command(vha, mcp);
2729 if (rval != QLA_SUCCESS) { 2690 if (rval != QLA_SUCCESS) {
2730 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2691 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2731 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2692 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2693 rval, mcp->mb[0], mcp->mb[1]);
2732 } else { 2694 } else {
2733 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2695 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
2734 2696
2735 if (mb) 2697 if (mb)
2736 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2698 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2748,14 +2710,14 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2748 mbx_cmd_t mc; 2710 mbx_cmd_t mc;
2749 mbx_cmd_t *mcp = &mc; 2711 mbx_cmd_t *mcp = &mc;
2750 2712
2713 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
2714
2751 if (!IS_FWI2_CAPABLE(vha->hw)) 2715 if (!IS_FWI2_CAPABLE(vha->hw))
2752 return QLA_FUNCTION_FAILED; 2716 return QLA_FUNCTION_FAILED;
2753 2717
2754 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2718 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2755 return QLA_FUNCTION_FAILED; 2719 return QLA_FUNCTION_FAILED;
2756 2720
2757 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2758
2759 mcp->mb[0] = MBC_TRACE_CONTROL; 2721 mcp->mb[0] = MBC_TRACE_CONTROL;
2760 mcp->mb[1] = TC_FCE_DISABLE; 2722 mcp->mb[1] = TC_FCE_DISABLE;
2761 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 2723 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
@@ -2766,10 +2728,11 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2766 mcp->flags = 0; 2728 mcp->flags = 0;
2767 rval = qla2x00_mailbox_command(vha, mcp); 2729 rval = qla2x00_mailbox_command(vha, mcp);
2768 if (rval != QLA_SUCCESS) { 2730 if (rval != QLA_SUCCESS) {
2769 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2731 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
2770 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2732 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2733 rval, mcp->mb[0], mcp->mb[1]);
2771 } else { 2734 } else {
2772 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2735 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
2773 2736
2774 if (wr) 2737 if (wr)
2775 *wr = (uint64_t) mcp->mb[5] << 48 | 2738 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2794,11 +2757,11 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2794 mbx_cmd_t mc; 2757 mbx_cmd_t mc;
2795 mbx_cmd_t *mcp = &mc; 2758 mbx_cmd_t *mcp = &mc;
2796 2759
2760 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
2761
2797 if (!IS_IIDMA_CAPABLE(vha->hw)) 2762 if (!IS_IIDMA_CAPABLE(vha->hw))
2798 return QLA_FUNCTION_FAILED; 2763 return QLA_FUNCTION_FAILED;
2799 2764
2800 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2801
2802 mcp->mb[0] = MBC_PORT_PARAMS; 2765 mcp->mb[0] = MBC_PORT_PARAMS;
2803 mcp->mb[1] = loop_id; 2766 mcp->mb[1] = loop_id;
2804 mcp->mb[2] = mcp->mb[3] = 0; 2767 mcp->mb[2] = mcp->mb[3] = 0;
@@ -2817,10 +2780,9 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2817 } 2780 }
2818 2781
2819 if (rval != QLA_SUCCESS) { 2782 if (rval != QLA_SUCCESS) {
2820 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2783 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2821 vha->host_no, rval));
2822 } else { 2784 } else {
2823 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2785 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
2824 if (port_speed) 2786 if (port_speed)
2825 *port_speed = mcp->mb[3]; 2787 *port_speed = mcp->mb[3];
2826 } 2788 }
@@ -2836,11 +2798,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2836 mbx_cmd_t mc; 2798 mbx_cmd_t mc;
2837 mbx_cmd_t *mcp = &mc; 2799 mbx_cmd_t *mcp = &mc;
2838 2800
2801 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
2802
2839 if (!IS_IIDMA_CAPABLE(vha->hw)) 2803 if (!IS_IIDMA_CAPABLE(vha->hw))
2840 return QLA_FUNCTION_FAILED; 2804 return QLA_FUNCTION_FAILED;
2841 2805
2842 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2843
2844 mcp->mb[0] = MBC_PORT_PARAMS; 2806 mcp->mb[0] = MBC_PORT_PARAMS;
2845 mcp->mb[1] = loop_id; 2807 mcp->mb[1] = loop_id;
2846 mcp->mb[2] = BIT_0; 2808 mcp->mb[2] = BIT_0;
@@ -2863,10 +2825,9 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2863 } 2825 }
2864 2826
2865 if (rval != QLA_SUCCESS) { 2827 if (rval != QLA_SUCCESS) {
2866 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2828 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
2867 vha->host_no, rval));
2868 } else { 2829 } else {
2869 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2830 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
2870 } 2831 }
2871 2832
2872 return rval; 2833 return rval;
@@ -2882,33 +2843,36 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2882 scsi_qla_host_t *vp; 2843 scsi_qla_host_t *vp;
2883 unsigned long flags; 2844 unsigned long flags;
2884 2845
2846 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
2847
2885 if (rptid_entry->entry_status != 0) 2848 if (rptid_entry->entry_status != 0)
2886 return; 2849 return;
2887 2850
2888 if (rptid_entry->format == 0) { 2851 if (rptid_entry->format == 0) {
2889 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2852 ql_dbg(ql_dbg_mbx, vha, 0x10b7,
2890 " number of VPs acquired %d\n", __func__, vha->host_no, 2853 "Format 0 : Number of VPs setup %d, number of "
2891 MSB(le16_to_cpu(rptid_entry->vp_count)), 2854 "VPs acquired %d.\n",
2892 LSB(le16_to_cpu(rptid_entry->vp_count)))); 2855 MSB(le16_to_cpu(rptid_entry->vp_count)),
2893 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, 2856 LSB(le16_to_cpu(rptid_entry->vp_count)));
2894 rptid_entry->port_id[2], rptid_entry->port_id[1], 2857 ql_dbg(ql_dbg_mbx, vha, 0x10b8,
2895 rptid_entry->port_id[0])); 2858 "Primary port id %02x%02x%02x.\n",
2859 rptid_entry->port_id[2], rptid_entry->port_id[1],
2860 rptid_entry->port_id[0]);
2896 } else if (rptid_entry->format == 1) { 2861 } else if (rptid_entry->format == 1) {
2897 vp_idx = LSB(stat); 2862 vp_idx = LSB(stat);
2898 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2863 ql_dbg(ql_dbg_mbx, vha, 0x10b9,
2899 "- status %d - " 2864 "Format 1: VP[%d] enabled - status %d - with "
2900 "with port id %02x%02x%02x\n", __func__, vha->host_no, 2865 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2901 vp_idx, MSB(stat),
2902 rptid_entry->port_id[2], rptid_entry->port_id[1], 2866 rptid_entry->port_id[2], rptid_entry->port_id[1],
2903 rptid_entry->port_id[0])); 2867 rptid_entry->port_id[0]);
2904 2868
2905 vp = vha; 2869 vp = vha;
2906 if (vp_idx == 0 && (MSB(stat) != 1)) 2870 if (vp_idx == 0 && (MSB(stat) != 1))
2907 goto reg_needed; 2871 goto reg_needed;
2908 2872
2909 if (MSB(stat) == 1) { 2873 if (MSB(stat) == 1) {
2910 DEBUG2(printk("scsi(%ld): Could not acquire ID for " 2874 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
2911 "VP[%d].\n", vha->host_no, vp_idx)); 2875 "Could not acquire ID for VP[%d].\n", vp_idx);
2912 return; 2876 return;
2913 } 2877 }
2914 2878
@@ -2963,10 +2927,12 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2963 2927
2964 /* This can be called by the parent */ 2928 /* This can be called by the parent */
2965 2929
2930 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
2931
2966 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 2932 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2967 if (!vpmod) { 2933 if (!vpmod) {
2968 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " 2934 ql_log(ql_log_warn, vha, 0x10bc,
2969 "IOCB.\n", __func__, vha->host_no)); 2935 "Failed to allocate modify VP IOCB.\n");
2970 return QLA_MEMORY_ALLOC_FAILED; 2936 return QLA_MEMORY_ALLOC_FAILED;
2971 } 2937 }
2972 2938
@@ -2983,22 +2949,21 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2983 2949
2984 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 2950 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2985 if (rval != QLA_SUCCESS) { 2951 if (rval != QLA_SUCCESS) {
2986 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" 2952 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
2987 "(%x).\n", __func__, base_vha->host_no, rval)); 2953 "Failed to issue VP config IOCB (%x).\n", rval);
2988 } else if (vpmod->comp_status != 0) { 2954 } else if (vpmod->comp_status != 0) {
2989 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2955 ql_dbg(ql_dbg_mbx, vha, 0x10be,
2990 "-- error status (%x).\n", __func__, base_vha->host_no, 2956 "Failed to complete IOCB -- error status (%x).\n",
2991 vpmod->comp_status)); 2957 vpmod->comp_status);
2992 rval = QLA_FUNCTION_FAILED; 2958 rval = QLA_FUNCTION_FAILED;
2993 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2959 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2994 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2960 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
2995 "-- completion status (%x).\n", __func__, base_vha->host_no, 2961 "Failed to complete IOCB -- completion status (%x).\n",
2996 le16_to_cpu(vpmod->comp_status))); 2962 le16_to_cpu(vpmod->comp_status));
2997 rval = QLA_FUNCTION_FAILED; 2963 rval = QLA_FUNCTION_FAILED;
2998 } else { 2964 } else {
2999 /* EMPTY */ 2965 /* EMPTY */
3000 DEBUG11(printk("%s(%ld): done.\n", __func__, 2966 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
3001 base_vha->host_no));
3002 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 2967 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3003 } 2968 }
3004 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 2969 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3032,17 +2997,16 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3032 int vp_index = vha->vp_idx; 2997 int vp_index = vha->vp_idx;
3033 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2998 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3034 2999
3035 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, 3000 ql_dbg(ql_dbg_mbx, vha, 0x10c1,
3036 vha->host_no, vp_index)); 3001 "Entered %s enabling index %d.\n", __func__, vp_index);
3037 3002
3038 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3003 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3039 return QLA_PARAMETER_ERROR; 3004 return QLA_PARAMETER_ERROR;
3040 3005
3041 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); 3006 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3042 if (!vce) { 3007 if (!vce) {
3043 DEBUG2_3(printk("%s(%ld): " 3008 ql_log(ql_log_warn, vha, 0x10c2,
3044 "failed to allocate VP Control IOCB.\n", __func__, 3009 "Failed to allocate VP control IOCB.\n");
3045 base_vha->host_no));
3046 return QLA_MEMORY_ALLOC_FAILED; 3010 return QLA_MEMORY_ALLOC_FAILED;
3047 } 3011 }
3048 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); 3012 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -3063,28 +3027,20 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3063 3027
3064 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); 3028 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3065 if (rval != QLA_SUCCESS) { 3029 if (rval != QLA_SUCCESS) {
3066 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" 3030 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3067 "(%x).\n", __func__, base_vha->host_no, rval)); 3031 "Failed to issue VP control IOCB (%x).\n", rval);
3068 printk("%s(%ld): failed to issue VP control IOCB"
3069 "(%x).\n", __func__, base_vha->host_no, rval);
3070 } else if (vce->entry_status != 0) { 3032 } else if (vce->entry_status != 0) {
3071 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 3033 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3072 "-- error status (%x).\n", __func__, base_vha->host_no, 3034 "Failed to complete IOCB -- error status (%x).\n",
3073 vce->entry_status));
3074 printk("%s(%ld): failed to complete IOCB "
3075 "-- error status (%x).\n", __func__, base_vha->host_no,
3076 vce->entry_status); 3035 vce->entry_status);
3077 rval = QLA_FUNCTION_FAILED; 3036 rval = QLA_FUNCTION_FAILED;
3078 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 3037 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3079 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 3038 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3080 "-- completion status (%x).\n", __func__, base_vha->host_no, 3039 "Failed to complet IOCB -- completion status (%x).\n",
3081 le16_to_cpu(vce->comp_status)));
3082 printk("%s(%ld): failed to complete IOCB "
3083 "-- completion status (%x).\n", __func__, base_vha->host_no,
3084 le16_to_cpu(vce->comp_status)); 3040 le16_to_cpu(vce->comp_status));
3085 rval = QLA_FUNCTION_FAILED; 3041 rval = QLA_FUNCTION_FAILED;
3086 } else { 3042 } else {
3087 DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no)); 3043 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
3088 } 3044 }
3089 3045
3090 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 3046 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3121,6 +3077,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3121 mbx_cmd_t mc; 3077 mbx_cmd_t mc;
3122 mbx_cmd_t *mcp = &mc; 3078 mbx_cmd_t *mcp = &mc;
3123 3079
3080 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
3081
3124 /* 3082 /*
3125 * This command is implicitly executed by firmware during login for the 3083 * This command is implicitly executed by firmware during login for the
3126 * physical hosts 3084 * physical hosts
@@ -3155,7 +3113,7 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3155 mbx_cmd_t mc; 3113 mbx_cmd_t mc;
3156 mbx_cmd_t *mcp = &mc; 3114 mbx_cmd_t *mcp = &mc;
3157 3115
3158 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3116 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
3159 3117
3160 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 3118 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3161 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 3119 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3186,10 +3144,10 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3186 rval = qla2x00_mailbox_command(vha, mcp); 3144 rval = qla2x00_mailbox_command(vha, mcp);
3187 3145
3188 if (rval != QLA_SUCCESS) { 3146 if (rval != QLA_SUCCESS) {
3189 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3147 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3190 vha->host_no, rval, mcp->mb[0])); 3148 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3191 } else { 3149 } else {
3192 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3150 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
3193 } 3151 }
3194 3152
3195 return rval; 3153 return rval;
@@ -3214,12 +3172,10 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3214 unsigned long flags; 3172 unsigned long flags;
3215 struct qla_hw_data *ha = vha->hw; 3173 struct qla_hw_data *ha = vha->hw;
3216 3174
3217 DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3175 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
3218 3176
3219 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3177 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3220 if (mn == NULL) { 3178 if (mn == NULL) {
3221 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
3222 "IOCB.\n", __func__, vha->host_no));
3223 return QLA_MEMORY_ALLOC_FAILED; 3179 return QLA_MEMORY_ALLOC_FAILED;
3224 } 3180 }
3225 3181
@@ -3237,43 +3193,43 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3237 mn->p.req.entry_count = 1; 3193 mn->p.req.entry_count = 1;
3238 mn->p.req.options = cpu_to_le16(options); 3194 mn->p.req.options = cpu_to_le16(options);
3239 3195
3240 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, 3196 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3241 vha->host_no)); 3197 "Dump of Verify Request.\n");
3242 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3198 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3243 sizeof(*mn))); 3199 (uint8_t *)mn, sizeof(*mn));
3244 3200
3245 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 3201 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3246 if (rval != QLA_SUCCESS) { 3202 if (rval != QLA_SUCCESS) {
3247 DEBUG2_16(printk("%s(%ld): failed to issue Verify " 3203 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3248 "IOCB (%x).\n", __func__, vha->host_no, rval)); 3204 "Failed to issue verify IOCB (%x).\n", rval);
3249 goto verify_done; 3205 goto verify_done;
3250 } 3206 }
3251 3207
3252 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, 3208 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3253 vha->host_no)); 3209 "Dump of Verify Response.\n");
3254 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3210 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3255 sizeof(*mn))); 3211 (uint8_t *)mn, sizeof(*mn));
3256 3212
3257 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 3213 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3258 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3214 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3259 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3215 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3260 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, 3216 ql_dbg(ql_dbg_mbx, vha, 0x10ce,
3261 vha->host_no, status[0], status[1])); 3217 "cs=%x fc=%x.\n", status[0], status[1]);
3262 3218
3263 if (status[0] != CS_COMPLETE) { 3219 if (status[0] != CS_COMPLETE) {
3264 rval = QLA_FUNCTION_FAILED; 3220 rval = QLA_FUNCTION_FAILED;
3265 if (!(options & VCO_DONT_UPDATE_FW)) { 3221 if (!(options & VCO_DONT_UPDATE_FW)) {
3266 DEBUG2_16(printk("%s(%ld): Firmware update " 3222 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3267 "failed. Retrying without update " 3223 "Firmware update failed. Retrying "
3268 "firmware.\n", __func__, vha->host_no)); 3224 "without update firmware.\n");
3269 options |= VCO_DONT_UPDATE_FW; 3225 options |= VCO_DONT_UPDATE_FW;
3270 options &= ~VCO_FORCE_UPDATE; 3226 options &= ~VCO_FORCE_UPDATE;
3271 retry = 1; 3227 retry = 1;
3272 } 3228 }
3273 } else { 3229 } else {
3274 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", 3230 ql_dbg(ql_dbg_mbx, vha, 0x10d0,
3275 __func__, vha->host_no, 3231 "Firmware updated to %x.\n",
3276 le32_to_cpu(mn->p.rsp.fw_ver))); 3232 le32_to_cpu(mn->p.rsp.fw_ver));
3277 3233
3278 /* NOTE: we only update OP firmware. */ 3234 /* NOTE: we only update OP firmware. */
3279 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 3235 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
@@ -3288,10 +3244,9 @@ verify_done:
3288 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 3244 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3289 3245
3290 if (rval != QLA_SUCCESS) { 3246 if (rval != QLA_SUCCESS) {
3291 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, 3247 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
3292 vha->host_no, rval));
3293 } else { 3248 } else {
3294 DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3249 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
3295 } 3250 }
3296 3251
3297 return rval; 3252 return rval;
@@ -3307,6 +3262,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3307 struct device_reg_25xxmq __iomem *reg; 3262 struct device_reg_25xxmq __iomem *reg;
3308 struct qla_hw_data *ha = vha->hw; 3263 struct qla_hw_data *ha = vha->hw;
3309 3264
3265 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
3266
3310 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3267 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3311 mcp->mb[1] = req->options; 3268 mcp->mb[1] = req->options;
3312 mcp->mb[2] = MSW(LSD(req->dma)); 3269 mcp->mb[2] = MSW(LSD(req->dma));
@@ -3344,9 +3301,13 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3344 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3345 3302
3346 rval = qla2x00_mailbox_command(vha, mcp); 3303 rval = qla2x00_mailbox_command(vha, mcp);
3347 if (rval != QLA_SUCCESS) 3304 if (rval != QLA_SUCCESS) {
3348 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n", 3305 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3349 __func__, vha->host_no, rval, mcp->mb[0])); 3306 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3307 } else {
3308 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
3309 }
3310
3350 return rval; 3311 return rval;
3351} 3312}
3352 3313
@@ -3360,6 +3321,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3360 struct device_reg_25xxmq __iomem *reg; 3321 struct device_reg_25xxmq __iomem *reg;
3361 struct qla_hw_data *ha = vha->hw; 3322 struct qla_hw_data *ha = vha->hw;
3362 3323
3324 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
3325
3363 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3326 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3364 mcp->mb[1] = rsp->options; 3327 mcp->mb[1] = rsp->options;
3365 mcp->mb[2] = MSW(LSD(rsp->dma)); 3328 mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3393,10 +3356,13 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3393 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3356 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3394 3357
3395 rval = qla2x00_mailbox_command(vha, mcp); 3358 rval = qla2x00_mailbox_command(vha, mcp);
3396 if (rval != QLA_SUCCESS) 3359 if (rval != QLA_SUCCESS) {
3397 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x " 3360 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3398 "mb0=%x.\n", __func__, 3361 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3399 vha->host_no, rval, mcp->mb[0])); 3362 } else {
3363 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
3364 }
3365
3400 return rval; 3366 return rval;
3401} 3367}
3402 3368
@@ -3407,7 +3373,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3407 mbx_cmd_t mc; 3373 mbx_cmd_t mc;
3408 mbx_cmd_t *mcp = &mc; 3374 mbx_cmd_t *mcp = &mc;
3409 3375
3410 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3376 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
3411 3377
3412 mcp->mb[0] = MBC_IDC_ACK; 3378 mcp->mb[0] = MBC_IDC_ACK;
3413 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 3379 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3418,10 +3384,10 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3418 rval = qla2x00_mailbox_command(vha, mcp); 3384 rval = qla2x00_mailbox_command(vha, mcp);
3419 3385
3420 if (rval != QLA_SUCCESS) { 3386 if (rval != QLA_SUCCESS) {
3421 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3387 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3422 vha->host_no, rval, mcp->mb[0])); 3388 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3423 } else { 3389 } else {
3424 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3390 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
3425 } 3391 }
3426 3392
3427 return rval; 3393 return rval;
@@ -3434,11 +3400,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3434 mbx_cmd_t mc; 3400 mbx_cmd_t mc;
3435 mbx_cmd_t *mcp = &mc; 3401 mbx_cmd_t *mcp = &mc;
3436 3402
3403 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
3404
3437 if (!IS_QLA81XX(vha->hw)) 3405 if (!IS_QLA81XX(vha->hw))
3438 return QLA_FUNCTION_FAILED; 3406 return QLA_FUNCTION_FAILED;
3439 3407
3440 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3441
3442 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3408 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3443 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 3409 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3444 mcp->out_mb = MBX_1|MBX_0; 3410 mcp->out_mb = MBX_1|MBX_0;
@@ -3448,10 +3414,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3448 rval = qla2x00_mailbox_command(vha, mcp); 3414 rval = qla2x00_mailbox_command(vha, mcp);
3449 3415
3450 if (rval != QLA_SUCCESS) { 3416 if (rval != QLA_SUCCESS) {
3451 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 3417 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3452 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3418 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3419 rval, mcp->mb[0], mcp->mb[1]);
3453 } else { 3420 } else {
3454 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3421 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
3455 *sector_size = mcp->mb[1]; 3422 *sector_size = mcp->mb[1];
3456 } 3423 }
3457 3424
@@ -3468,7 +3435,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3468 if (!IS_QLA81XX(vha->hw)) 3435 if (!IS_QLA81XX(vha->hw))
3469 return QLA_FUNCTION_FAILED; 3436 return QLA_FUNCTION_FAILED;
3470 3437
3471 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3438 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
3472 3439
3473 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3440 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3474 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 3441 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3480,10 +3447,11 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3480 rval = qla2x00_mailbox_command(vha, mcp); 3447 rval = qla2x00_mailbox_command(vha, mcp);
3481 3448
3482 if (rval != QLA_SUCCESS) { 3449 if (rval != QLA_SUCCESS) {
3483 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 3450 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3484 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3451 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3452 rval, mcp->mb[0], mcp->mb[1]);
3485 } else { 3453 } else {
3486 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3454 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
3487 } 3455 }
3488 3456
3489 return rval; 3457 return rval;
@@ -3499,7 +3467,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3499 if (!IS_QLA81XX(vha->hw)) 3467 if (!IS_QLA81XX(vha->hw))
3500 return QLA_FUNCTION_FAILED; 3468 return QLA_FUNCTION_FAILED;
3501 3469
3502 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3470 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
3503 3471
3504 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3472 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3505 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 3473 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3514,11 +3482,11 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3514 rval = qla2x00_mailbox_command(vha, mcp); 3482 rval = qla2x00_mailbox_command(vha, mcp);
3515 3483
3516 if (rval != QLA_SUCCESS) { 3484 if (rval != QLA_SUCCESS) {
3517 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 3485 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3518 "mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], 3486 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3519 mcp->mb[1], mcp->mb[2])); 3487 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3520 } else { 3488 } else {
3521 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3489 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
3522 } 3490 }
3523 3491
3524 return rval; 3492 return rval;
@@ -3531,7 +3499,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3531 mbx_cmd_t mc; 3499 mbx_cmd_t mc;
3532 mbx_cmd_t *mcp = &mc; 3500 mbx_cmd_t *mcp = &mc;
3533 3501
3534 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3502 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
3535 3503
3536 mcp->mb[0] = MBC_RESTART_MPI_FW; 3504 mcp->mb[0] = MBC_RESTART_MPI_FW;
3537 mcp->out_mb = MBX_0; 3505 mcp->out_mb = MBX_0;
@@ -3541,10 +3509,11 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3541 rval = qla2x00_mailbox_command(vha, mcp); 3509 rval = qla2x00_mailbox_command(vha, mcp);
3542 3510
3543 if (rval != QLA_SUCCESS) { 3511 if (rval != QLA_SUCCESS) {
3544 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", 3512 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3545 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3513 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3514 rval, mcp->mb[0], mcp->mb[1]);
3546 } else { 3515 } else {
3547 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3516 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
3548 } 3517 }
3549 3518
3550 return rval; 3519 return rval;
@@ -3559,11 +3528,11 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3559 mbx_cmd_t *mcp = &mc; 3528 mbx_cmd_t *mcp = &mc;
3560 struct qla_hw_data *ha = vha->hw; 3529 struct qla_hw_data *ha = vha->hw;
3561 3530
3531 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
3532
3562 if (!IS_FWI2_CAPABLE(ha)) 3533 if (!IS_FWI2_CAPABLE(ha))
3563 return QLA_FUNCTION_FAILED; 3534 return QLA_FUNCTION_FAILED;
3564 3535
3565 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3566
3567 if (len == 1) 3536 if (len == 1)
3568 opt |= BIT_0; 3537 opt |= BIT_0;
3569 3538
@@ -3586,10 +3555,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3586 *sfp = mcp->mb[1]; 3555 *sfp = mcp->mb[1];
3587 3556
3588 if (rval != QLA_SUCCESS) { 3557 if (rval != QLA_SUCCESS) {
3589 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3558 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3590 vha->host_no, rval, mcp->mb[0])); 3559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3591 } else { 3560 } else {
3592 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3561 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
3593 } 3562 }
3594 3563
3595 return rval; 3564 return rval;
@@ -3604,11 +3573,11 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3604 mbx_cmd_t *mcp = &mc; 3573 mbx_cmd_t *mcp = &mc;
3605 struct qla_hw_data *ha = vha->hw; 3574 struct qla_hw_data *ha = vha->hw;
3606 3575
3576 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
3577
3607 if (!IS_FWI2_CAPABLE(ha)) 3578 if (!IS_FWI2_CAPABLE(ha))
3608 return QLA_FUNCTION_FAILED; 3579 return QLA_FUNCTION_FAILED;
3609 3580
3610 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3611
3612 if (len == 1) 3581 if (len == 1)
3613 opt |= BIT_0; 3582 opt |= BIT_0;
3614 3583
@@ -3631,10 +3600,10 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3631 rval = qla2x00_mailbox_command(vha, mcp); 3600 rval = qla2x00_mailbox_command(vha, mcp);
3632 3601
3633 if (rval != QLA_SUCCESS) { 3602 if (rval != QLA_SUCCESS) {
3634 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3603 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3635 vha->host_no, rval, mcp->mb[0])); 3604 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3636 } else { 3605 } else {
3637 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3606 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
3638 } 3607 }
3639 3608
3640 return rval; 3609 return rval;
@@ -3648,11 +3617,11 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3648 mbx_cmd_t mc; 3617 mbx_cmd_t mc;
3649 mbx_cmd_t *mcp = &mc; 3618 mbx_cmd_t *mcp = &mc;
3650 3619
3620 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
3621
3651 if (!IS_QLA8XXX_TYPE(vha->hw)) 3622 if (!IS_QLA8XXX_TYPE(vha->hw))
3652 return QLA_FUNCTION_FAILED; 3623 return QLA_FUNCTION_FAILED;
3653 3624
3654 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3655
3656 mcp->mb[0] = MBC_GET_XGMAC_STATS; 3625 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3657 mcp->mb[2] = MSW(stats_dma); 3626 mcp->mb[2] = MSW(stats_dma);
3658 mcp->mb[3] = LSW(stats_dma); 3627 mcp->mb[3] = LSW(stats_dma);
@@ -3666,11 +3635,12 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3666 rval = qla2x00_mailbox_command(vha, mcp); 3635 rval = qla2x00_mailbox_command(vha, mcp);
3667 3636
3668 if (rval != QLA_SUCCESS) { 3637 if (rval != QLA_SUCCESS) {
3669 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " 3638 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
3670 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, 3639 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3671 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 3640 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3672 } else { 3641 } else {
3673 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3642 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
3643
3674 3644
3675 *actual_size = mcp->mb[2] << 2; 3645 *actual_size = mcp->mb[2] << 2;
3676 } 3646 }
@@ -3686,11 +3656,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3686 mbx_cmd_t mc; 3656 mbx_cmd_t mc;
3687 mbx_cmd_t *mcp = &mc; 3657 mbx_cmd_t *mcp = &mc;
3688 3658
3659 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
3660
3689 if (!IS_QLA8XXX_TYPE(vha->hw)) 3661 if (!IS_QLA8XXX_TYPE(vha->hw))
3690 return QLA_FUNCTION_FAILED; 3662 return QLA_FUNCTION_FAILED;
3691 3663
3692 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3693
3694 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 3664 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3695 mcp->mb[1] = 0; 3665 mcp->mb[1] = 0;
3696 mcp->mb[2] = MSW(tlv_dma); 3666 mcp->mb[2] = MSW(tlv_dma);
@@ -3705,11 +3675,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3705 rval = qla2x00_mailbox_command(vha, mcp); 3675 rval = qla2x00_mailbox_command(vha, mcp);
3706 3676
3707 if (rval != QLA_SUCCESS) { 3677 if (rval != QLA_SUCCESS) {
3708 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " 3678 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
3709 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, 3679 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3710 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 3680 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3711 } else { 3681 } else {
3712 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3682 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
3713 } 3683 }
3714 3684
3715 return rval; 3685 return rval;
@@ -3722,11 +3692,11 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3722 mbx_cmd_t mc; 3692 mbx_cmd_t mc;
3723 mbx_cmd_t *mcp = &mc; 3693 mbx_cmd_t *mcp = &mc;
3724 3694
3695 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
3696
3725 if (!IS_FWI2_CAPABLE(vha->hw)) 3697 if (!IS_FWI2_CAPABLE(vha->hw))
3726 return QLA_FUNCTION_FAILED; 3698 return QLA_FUNCTION_FAILED;
3727 3699
3728 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3729
3730 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 3700 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3731 mcp->mb[1] = LSW(risc_addr); 3701 mcp->mb[1] = LSW(risc_addr);
3732 mcp->mb[8] = MSW(risc_addr); 3702 mcp->mb[8] = MSW(risc_addr);
@@ -3736,10 +3706,10 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3736 mcp->flags = 0; 3706 mcp->flags = 0;
3737 rval = qla2x00_mailbox_command(vha, mcp); 3707 rval = qla2x00_mailbox_command(vha, mcp);
3738 if (rval != QLA_SUCCESS) { 3708 if (rval != QLA_SUCCESS) {
3739 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3709 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3740 vha->host_no, rval, mcp->mb[0])); 3710 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3741 } else { 3711 } else {
3742 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3712 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
3743 *data = mcp->mb[3] << 16 | mcp->mb[2]; 3713 *data = mcp->mb[3] << 16 | mcp->mb[2];
3744 } 3714 }
3745 3715
@@ -3755,7 +3725,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3755 mbx_cmd_t *mcp = &mc; 3725 mbx_cmd_t *mcp = &mc;
3756 uint32_t iter_cnt = 0x1; 3726 uint32_t iter_cnt = 0x1;
3757 3727
3758 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); 3728 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
3759 3729
3760 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3730 memset(mcp->mb, 0 , sizeof(mcp->mb));
3761 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 3731 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3794,15 +3764,12 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3794 rval = qla2x00_mailbox_command(vha, mcp); 3764 rval = qla2x00_mailbox_command(vha, mcp);
3795 3765
3796 if (rval != QLA_SUCCESS) { 3766 if (rval != QLA_SUCCESS) {
3797 DEBUG2(printk(KERN_WARNING 3767 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
3798 "(%ld): failed=%x mb[0]=0x%x " 3768 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
3799 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x " 3769 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3800 "mb[19]=0x%x.\n", 3770 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3801 vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3802 mcp->mb[3], mcp->mb[18], mcp->mb[19]));
3803 } else { 3771 } else {
3804 DEBUG2(printk(KERN_WARNING 3772 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
3805 "scsi(%ld): done.\n", vha->host_no));
3806 } 3773 }
3807 3774
3808 /* Copy mailbox information */ 3775 /* Copy mailbox information */
@@ -3819,7 +3786,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3819 mbx_cmd_t *mcp = &mc; 3786 mbx_cmd_t *mcp = &mc;
3820 struct qla_hw_data *ha = vha->hw; 3787 struct qla_hw_data *ha = vha->hw;
3821 3788
3822 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); 3789 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
3823 3790
3824 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3791 memset(mcp->mb, 0 , sizeof(mcp->mb));
3825 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 3792 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3858,12 +3825,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3858 rval = qla2x00_mailbox_command(vha, mcp); 3825 rval = qla2x00_mailbox_command(vha, mcp);
3859 3826
3860 if (rval != QLA_SUCCESS) { 3827 if (rval != QLA_SUCCESS) {
3861 DEBUG2(printk(KERN_WARNING 3828 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
3862 "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", 3829 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3863 vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3830 rval, mcp->mb[0], mcp->mb[1]);
3864 } else { 3831 } else {
3865 DEBUG2(printk(KERN_WARNING 3832 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
3866 "scsi(%ld): done.\n", vha->host_no));
3867 } 3833 }
3868 3834
3869 /* Copy mailbox information */ 3835 /* Copy mailbox information */
@@ -3872,14 +3838,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3872} 3838}
3873 3839
3874int 3840int
3875qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) 3841qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3876{ 3842{
3877 int rval; 3843 int rval;
3878 mbx_cmd_t mc; 3844 mbx_cmd_t mc;
3879 mbx_cmd_t *mcp = &mc; 3845 mbx_cmd_t *mcp = &mc;
3880 3846
3881 DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__, 3847 ql_dbg(ql_dbg_mbx, vha, 0x10fd,
3882 ha->host_no, enable_diagnostic)); 3848 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3883 3849
3884 mcp->mb[0] = MBC_ISP84XX_RESET; 3850 mcp->mb[0] = MBC_ISP84XX_RESET;
3885 mcp->mb[1] = enable_diagnostic; 3851 mcp->mb[1] = enable_diagnostic;
@@ -3887,13 +3853,12 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
3887 mcp->in_mb = MBX_1|MBX_0; 3853 mcp->in_mb = MBX_1|MBX_0;
3888 mcp->tov = MBX_TOV_SECONDS; 3854 mcp->tov = MBX_TOV_SECONDS;
3889 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 3855 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3890 rval = qla2x00_mailbox_command(ha, mcp); 3856 rval = qla2x00_mailbox_command(vha, mcp);
3891 3857
3892 if (rval != QLA_SUCCESS) 3858 if (rval != QLA_SUCCESS)
3893 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, 3859 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3894 rval));
3895 else 3860 else
3896 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3861 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
3897 3862
3898 return rval; 3863 return rval;
3899} 3864}
@@ -3905,11 +3870,11 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3905 mbx_cmd_t mc; 3870 mbx_cmd_t mc;
3906 mbx_cmd_t *mcp = &mc; 3871 mbx_cmd_t *mcp = &mc;
3907 3872
3873 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
3874
3908 if (!IS_FWI2_CAPABLE(vha->hw)) 3875 if (!IS_FWI2_CAPABLE(vha->hw))
3909 return QLA_FUNCTION_FAILED; 3876 return QLA_FUNCTION_FAILED;
3910 3877
3911 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3912
3913 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 3878 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3914 mcp->mb[1] = LSW(risc_addr); 3879 mcp->mb[1] = LSW(risc_addr);
3915 mcp->mb[2] = LSW(data); 3880 mcp->mb[2] = LSW(data);
@@ -3921,10 +3886,10 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3921 mcp->flags = 0; 3886 mcp->flags = 0;
3922 rval = qla2x00_mailbox_command(vha, mcp); 3887 rval = qla2x00_mailbox_command(vha, mcp);
3923 if (rval != QLA_SUCCESS) { 3888 if (rval != QLA_SUCCESS) {
3924 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3889 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3925 vha->host_no, rval, mcp->mb[0])); 3890 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3926 } else { 3891 } else {
3927 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3892 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
3928 } 3893 }
3929 3894
3930 return rval; 3895 return rval;
@@ -3941,8 +3906,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3941 3906
3942 rval = QLA_SUCCESS; 3907 rval = QLA_SUCCESS;
3943 3908
3944 DEBUG11(qla_printk(KERN_INFO, ha, 3909 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
3945 "%s(%ld): entered.\n", __func__, vha->host_no));
3946 3910
3947 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 3911 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3948 3912
@@ -3982,11 +3946,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3982 rval = QLA_FUNCTION_FAILED; 3946 rval = QLA_FUNCTION_FAILED;
3983 3947
3984 if (rval != QLA_SUCCESS) { 3948 if (rval != QLA_SUCCESS) {
3985 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", 3949 ql_dbg(ql_dbg_mbx, vha, 0x1104,
3986 __func__, vha->host_no, rval, mb[0])); 3950 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
3987 } else { 3951 } else {
3988 DEBUG11(printk(KERN_INFO 3952 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
3989 "%s(%ld): done.\n", __func__, vha->host_no));
3990 } 3953 }
3991 3954
3992 return rval; 3955 return rval;
@@ -3999,12 +3962,11 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
3999 mbx_cmd_t *mcp = &mc; 3962 mbx_cmd_t *mcp = &mc;
4000 struct qla_hw_data *ha = vha->hw; 3963 struct qla_hw_data *ha = vha->hw;
4001 3964
3965 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
3966
4002 if (!IS_FWI2_CAPABLE(ha)) 3967 if (!IS_FWI2_CAPABLE(ha))
4003 return QLA_FUNCTION_FAILED; 3968 return QLA_FUNCTION_FAILED;
4004 3969
4005 DEBUG11(qla_printk(KERN_INFO, ha,
4006 "%s(%ld): entered.\n", __func__, vha->host_no));
4007
4008 mcp->mb[0] = MBC_DATA_RATE; 3970 mcp->mb[0] = MBC_DATA_RATE;
4009 mcp->mb[1] = 0; 3971 mcp->mb[1] = 0;
4010 mcp->out_mb = MBX_1|MBX_0; 3972 mcp->out_mb = MBX_1|MBX_0;
@@ -4013,11 +3975,10 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4013 mcp->flags = 0; 3975 mcp->flags = 0;
4014 rval = qla2x00_mailbox_command(vha, mcp); 3976 rval = qla2x00_mailbox_command(vha, mcp);
4015 if (rval != QLA_SUCCESS) { 3977 if (rval != QLA_SUCCESS) {
4016 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", 3978 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4017 __func__, vha->host_no, rval, mcp->mb[0])); 3979 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4018 } else { 3980 } else {
4019 DEBUG11(printk(KERN_INFO 3981 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
4020 "%s(%ld): done.\n", __func__, vha->host_no));
4021 if (mcp->mb[1] != 0x7) 3982 if (mcp->mb[1] != 0x7)
4022 ha->link_data_rate = mcp->mb[1]; 3983 ha->link_data_rate = mcp->mb[1];
4023 } 3984 }
@@ -4033,8 +3994,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4033 mbx_cmd_t *mcp = &mc; 3994 mbx_cmd_t *mcp = &mc;
4034 struct qla_hw_data *ha = vha->hw; 3995 struct qla_hw_data *ha = vha->hw;
4035 3996
4036 DEBUG11(printk(KERN_INFO 3997 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
4037 "%s(%ld): entered.\n", __func__, vha->host_no));
4038 3998
4039 if (!IS_QLA81XX(ha)) 3999 if (!IS_QLA81XX(ha))
4040 return QLA_FUNCTION_FAILED; 4000 return QLA_FUNCTION_FAILED;
@@ -4047,15 +4007,13 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4047 rval = qla2x00_mailbox_command(vha, mcp); 4007 rval = qla2x00_mailbox_command(vha, mcp);
4048 4008
4049 if (rval != QLA_SUCCESS) { 4009 if (rval != QLA_SUCCESS) {
4050 DEBUG2_3_11(printk(KERN_WARNING 4010 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4051 "%s(%ld): failed=%x (%x).\n", __func__, 4011 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4052 vha->host_no, rval, mcp->mb[0]));
4053 } else { 4012 } else {
4054 /* Copy all bits to preserve original value */ 4013 /* Copy all bits to preserve original value */
4055 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 4014 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4056 4015
4057 DEBUG11(printk(KERN_INFO 4016 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
4058 "%s(%ld): done.\n", __func__, vha->host_no));
4059 } 4017 }
4060 return rval; 4018 return rval;
4061} 4019}
@@ -4067,8 +4025,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4067 mbx_cmd_t mc; 4025 mbx_cmd_t mc;
4068 mbx_cmd_t *mcp = &mc; 4026 mbx_cmd_t *mcp = &mc;
4069 4027
4070 DEBUG11(printk(KERN_INFO 4028 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
4071 "%s(%ld): entered.\n", __func__, vha->host_no));
4072 4029
4073 mcp->mb[0] = MBC_SET_PORT_CONFIG; 4030 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4074 /* Copy all bits to preserve original setting */ 4031 /* Copy all bits to preserve original setting */
@@ -4080,12 +4037,10 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4080 rval = qla2x00_mailbox_command(vha, mcp); 4037 rval = qla2x00_mailbox_command(vha, mcp);
4081 4038
4082 if (rval != QLA_SUCCESS) { 4039 if (rval != QLA_SUCCESS) {
4083 DEBUG2_3_11(printk(KERN_WARNING 4040 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4084 "%s(%ld): failed=%x (%x).\n", __func__, 4041 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4085 vha->host_no, rval, mcp->mb[0]));
4086 } else 4042 } else
4087 DEBUG11(printk(KERN_INFO 4043 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
4088 "%s(%ld): done.\n", __func__, vha->host_no));
4089 4044
4090 return rval; 4045 return rval;
4091} 4046}
@@ -4100,12 +4055,11 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4100 mbx_cmd_t *mcp = &mc; 4055 mbx_cmd_t *mcp = &mc;
4101 struct qla_hw_data *ha = vha->hw; 4056 struct qla_hw_data *ha = vha->hw;
4102 4057
4058 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
4059
4103 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 4060 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4104 return QLA_FUNCTION_FAILED; 4061 return QLA_FUNCTION_FAILED;
4105 4062
4106 DEBUG11(printk(KERN_INFO
4107 "%s(%ld): entered.\n", __func__, vha->host_no));
4108
4109 mcp->mb[0] = MBC_PORT_PARAMS; 4063 mcp->mb[0] = MBC_PORT_PARAMS;
4110 mcp->mb[1] = loop_id; 4064 mcp->mb[1] = loop_id;
4111 if (ha->flags.fcp_prio_enabled) 4065 if (ha->flags.fcp_prio_enabled)
@@ -4127,12 +4081,9 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4127 } 4081 }
4128 4082
4129 if (rval != QLA_SUCCESS) { 4083 if (rval != QLA_SUCCESS) {
4130 DEBUG2_3_11(printk(KERN_WARNING 4084 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4131 "%s(%ld): failed=%x.\n", __func__,
4132 vha->host_no, rval));
4133 } else { 4085 } else {
4134 DEBUG11(printk(KERN_INFO 4086 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
4135 "%s(%ld): done.\n", __func__, vha->host_no));
4136 } 4087 }
4137 4088
4138 return rval; 4089 return rval;
@@ -4145,13 +4096,12 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4145 uint8_t byte; 4096 uint8_t byte;
4146 struct qla_hw_data *ha = vha->hw; 4097 struct qla_hw_data *ha = vha->hw;
4147 4098
4148 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); 4099 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
4149 4100
4150 /* Integer part */ 4101 /* Integer part */
4151 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4102 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4152 if (rval != QLA_SUCCESS) { 4103 if (rval != QLA_SUCCESS) {
4153 DEBUG2_3_11(printk(KERN_WARNING 4104 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4154 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4155 ha->flags.thermal_supported = 0; 4105 ha->flags.thermal_supported = 0;
4156 goto fail; 4106 goto fail;
4157 } 4107 }
@@ -4160,14 +4110,13 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4160 /* Fraction part */ 4110 /* Fraction part */
4161 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); 4111 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4162 if (rval != QLA_SUCCESS) { 4112 if (rval != QLA_SUCCESS) {
4163 DEBUG2_3_11(printk(KERN_WARNING 4113 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4164 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4165 ha->flags.thermal_supported = 0; 4114 ha->flags.thermal_supported = 0;
4166 goto fail; 4115 goto fail;
4167 } 4116 }
4168 *frac = (byte >> 6) * 25; 4117 *frac = (byte >> 6) * 25;
4169 4118
4170 DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); 4119 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
4171fail: 4120fail:
4172 return rval; 4121 return rval;
4173} 4122}
@@ -4180,12 +4129,11 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4180 mbx_cmd_t mc; 4129 mbx_cmd_t mc;
4181 mbx_cmd_t *mcp = &mc; 4130 mbx_cmd_t *mcp = &mc;
4182 4131
4132 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
4133
4183 if (!IS_FWI2_CAPABLE(ha)) 4134 if (!IS_FWI2_CAPABLE(ha))
4184 return QLA_FUNCTION_FAILED; 4135 return QLA_FUNCTION_FAILED;
4185 4136
4186 DEBUG11(qla_printk(KERN_INFO, ha,
4187 "%s(%ld): entered.\n", __func__, vha->host_no));
4188
4189 memset(mcp, 0, sizeof(mbx_cmd_t)); 4137 memset(mcp, 0, sizeof(mbx_cmd_t));
4190 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 4138 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4191 mcp->mb[1] = 1; 4139 mcp->mb[1] = 1;
@@ -4197,12 +4145,10 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4197 4145
4198 rval = qla2x00_mailbox_command(vha, mcp); 4146 rval = qla2x00_mailbox_command(vha, mcp);
4199 if (rval != QLA_SUCCESS) { 4147 if (rval != QLA_SUCCESS) {
4200 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 4148 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4201 "%s(%ld): failed=%x mb[0]=%x.\n", __func__, 4149 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4202 vha->host_no, rval, mcp->mb[0]));
4203 } else { 4150 } else {
4204 DEBUG11(qla_printk(KERN_INFO, ha, 4151 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
4205 "%s(%ld): done.\n", __func__, vha->host_no));
4206 } 4152 }
4207 4153
4208 return rval; 4154 return rval;
@@ -4216,12 +4162,11 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4216 mbx_cmd_t mc; 4162 mbx_cmd_t mc;
4217 mbx_cmd_t *mcp = &mc; 4163 mbx_cmd_t *mcp = &mc;
4218 4164
4165 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
4166
4219 if (!IS_QLA82XX(ha)) 4167 if (!IS_QLA82XX(ha))
4220 return QLA_FUNCTION_FAILED; 4168 return QLA_FUNCTION_FAILED;
4221 4169
4222 DEBUG11(qla_printk(KERN_INFO, ha,
4223 "%s(%ld): entered.\n", __func__, vha->host_no));
4224
4225 memset(mcp, 0, sizeof(mbx_cmd_t)); 4170 memset(mcp, 0, sizeof(mbx_cmd_t));
4226 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 4171 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4227 mcp->mb[1] = 0; 4172 mcp->mb[1] = 0;
@@ -4233,12 +4178,10 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4233 4178
4234 rval = qla2x00_mailbox_command(vha, mcp); 4179 rval = qla2x00_mailbox_command(vha, mcp);
4235 if (rval != QLA_SUCCESS) { 4180 if (rval != QLA_SUCCESS) {
4236 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 4181 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4237 "%s(%ld): failed=%x mb[0]=%x.\n", __func__, 4182 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4238 vha->host_no, rval, mcp->mb[0]));
4239 } else { 4183 } else {
4240 DEBUG11(qla_printk(KERN_INFO, ha, 4184 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
4241 "%s(%ld): done.\n", __func__, vha->host_no));
4242 } 4185 }
4243 4186
4244 return rval; 4187 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 5e343919acad..c706ed370000 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
36 mutex_lock(&ha->vport_lock); 36 mutex_lock(&ha->vport_lock);
37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); 37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
38 if (vp_id > ha->max_npiv_vports) { 38 if (vp_id > ha->max_npiv_vports) {
39 DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n", 39 ql_dbg(ql_dbg_vport, vha, 0xa000,
40 vp_id, ha->max_npiv_vports)); 40 "vp_id %d is bigger than max-supported %d.\n",
41 vp_id, ha->max_npiv_vports);
41 mutex_unlock(&ha->vport_lock); 42 mutex_unlock(&ha->vport_lock);
42 return vp_id; 43 return vp_id;
43 } 44 }
@@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
131 fc_port_t *fcport; 132 fc_port_t *fcport;
132 133
133 list_for_each_entry(fcport, &vha->vp_fcports, list) { 134 list_for_each_entry(fcport, &vha->vp_fcports, list) {
134 DEBUG15(printk("scsi(%ld): Marking port dead, " 135 ql_dbg(ql_dbg_vport, vha, 0xa001,
135 "loop_id=0x%04x :%x\n", 136 "Marking port dead, loop_id=0x%04x : %x.\n",
136 vha->host_no, fcport->loop_id, fcport->vp_idx)); 137 fcport->loop_id, fcport->vp_idx);
137 138
138 qla2x00_mark_device_lost(vha, fcport, 0, 0); 139 qla2x00_mark_device_lost(vha, fcport, 0, 0);
139 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 140 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
187 goto enable_failed; 188 goto enable_failed;
188 } 189 }
189 190
190 DEBUG15(qla_printk(KERN_INFO, ha, 191 ql_dbg(ql_dbg_taskm, vha, 0x801a,
191 "Virtual port with id: %d - Enabled\n", vha->vp_idx)); 192 "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
192 return 0; 193 return 0;
193 194
194enable_failed: 195enable_failed:
195 DEBUG15(qla_printk(KERN_INFO, ha, 196 ql_dbg(ql_dbg_taskm, vha, 0x801b,
196 "Virtual port with id: %d - Disabled\n", vha->vp_idx)); 197 "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
197 return 1; 198 return 1;
198} 199}
199 200
@@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
205 206
206 fc_vport = vha->fc_vport; 207 fc_vport = vha->fc_vport;
207 208
208 DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n", 209 ql_dbg(ql_dbg_vport, vha, 0xa002,
209 vha->host_no, __func__)); 210 "%s: change request #3.\n", __func__);
210 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 211 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
211 if (ret != QLA_SUCCESS) { 212 if (ret != QLA_SUCCESS) {
212 DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable " 213 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
213 "receiving of RSCN requests: 0x%x\n", ret)); 214 "receiving of RSCN requests: 0x%x.\n", ret);
214 return; 215 return;
215 } else { 216 } else {
216 /* Corresponds to SCR enabled */ 217 /* Corresponds to SCR enabled */
@@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
248 case MBA_CHG_IN_CONNECTION: 249 case MBA_CHG_IN_CONNECTION:
249 case MBA_PORT_UPDATE: 250 case MBA_PORT_UPDATE:
250 case MBA_RSCN_UPDATE: 251 case MBA_RSCN_UPDATE:
251 DEBUG15(printk("scsi(%ld)%s: Async_event for" 252 ql_dbg(ql_dbg_async, vha, 0x5024,
252 " VP[%d], mb = 0x%x, vha=%p\n", 253 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
253 vha->host_no, __func__, i, *mb, vha)); 254 i, *mb, vha);
254 qla2x00_async_event(vha, rsp, mb); 255 qla2x00_async_event(vha, rsp, mb);
255 break; 256 break;
256 } 257 }
@@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
286 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 287 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
287 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 288 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
288 289
289 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n", 290 ql_dbg(ql_dbg_taskm, vha, 0x801d,
290 vha->host_no, vha->vp_idx)); 291 "Scheduling enable of Vport %d.\n", vha->vp_idx);
291 return qla24xx_enable_vp(vha); 292 return qla24xx_enable_vp(vha);
292} 293}
293 294
294static int 295static int
295qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 296qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
296{ 297{
298 ql_dbg(ql_dbg_dpc, vha, 0x4012,
299 "Entering %s.\n", __func__);
300 ql_dbg(ql_dbg_dpc, vha, 0x4013,
301 "vp_flags: 0x%lx.\n", vha->vp_flags);
302
297 qla2x00_do_work(vha); 303 qla2x00_do_work(vha);
298 304
299 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 305 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
300 /* VP acquired. complete port configuration */ 306 /* VP acquired. complete port configuration */
307 ql_dbg(ql_dbg_dpc, vha, 0x4014,
308 "Configure VP scheduled.\n");
301 qla24xx_configure_vp(vha); 309 qla24xx_configure_vp(vha);
310 ql_dbg(ql_dbg_dpc, vha, 0x4015,
311 "Configure VP end.\n");
302 return 0; 312 return 0;
303 } 313 }
304 314
305 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { 315 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
316 ql_dbg(ql_dbg_dpc, vha, 0x4016,
317 "FCPort update scheduled.\n");
306 qla2x00_update_fcports(vha); 318 qla2x00_update_fcports(vha);
307 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); 319 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
320 ql_dbg(ql_dbg_dpc, vha, 0x4017,
321 "FCPort update end.\n");
308 } 322 }
309 323
310 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && 324 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
311 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 325 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
312 atomic_read(&vha->loop_state) != LOOP_DOWN) { 326 atomic_read(&vha->loop_state) != LOOP_DOWN) {
313 327
314 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 328 ql_dbg(ql_dbg_dpc, vha, 0x4018,
315 vha->host_no)); 329 "Relogin needed scheduled.\n");
316 qla2x00_relogin(vha); 330 qla2x00_relogin(vha);
317 331 ql_dbg(ql_dbg_dpc, vha, 0x4019,
318 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 332 "Relogin needed end.\n");
319 vha->host_no));
320 } 333 }
321 334
322 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 335 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
326 339
327 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 340 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
328 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 341 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
342 ql_dbg(ql_dbg_dpc, vha, 0x401a,
343 "Loop resync scheduled.\n");
329 qla2x00_loop_resync(vha); 344 qla2x00_loop_resync(vha);
330 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 345 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
346 ql_dbg(ql_dbg_dpc, vha, 0x401b,
347 "Loop resync end.\n");
331 } 348 }
332 } 349 }
333 350
351 ql_dbg(ql_dbg_dpc, vha, 0x401c,
352 "Exiting %s.\n", __func__);
334 return 0; 353 return 0;
335} 354}
336 355
@@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
396 415
397 /* Check up max-npiv-supports */ 416 /* Check up max-npiv-supports */
398 if (ha->num_vhosts > ha->max_npiv_vports) { 417 if (ha->num_vhosts > ha->max_npiv_vports) {
399 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than " 418 ql_dbg(ql_dbg_vport, vha, 0xa004,
400 "max_npv_vports %ud.\n", base_vha->host_no, 419 "num_vhosts %ud is bigger "
401 ha->num_vhosts, ha->max_npiv_vports)); 420 "than max_npiv_vports %ud.\n",
421 ha->num_vhosts, ha->max_npiv_vports);
402 return VPCERR_UNSUPPORTED; 422 return VPCERR_UNSUPPORTED;
403 } 423 }
404 return 0; 424 return 0;
@@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
415 435
416 vha = qla2x00_create_host(sht, ha); 436 vha = qla2x00_create_host(sht, ha);
417 if (!vha) { 437 if (!vha) {
418 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n")); 438 ql_log(ql_log_warn, vha, 0xa005,
439 "scsi_host_alloc() failed for vport.\n");
419 return(NULL); 440 return(NULL);
420 } 441 }
421 442
@@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
429 vha->device_flags = 0; 450 vha->device_flags = 0;
430 vha->vp_idx = qla24xx_allocate_vp_id(vha); 451 vha->vp_idx = qla24xx_allocate_vp_id(vha);
431 if (vha->vp_idx > ha->max_npiv_vports) { 452 if (vha->vp_idx > ha->max_npiv_vports) {
432 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 453 ql_dbg(ql_dbg_vport, vha, 0xa006,
433 vha->host_no)); 454 "Couldn't allocate vp_id.\n");
434 goto create_vhost_failed; 455 goto create_vhost_failed;
435 } 456 }
436 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 457 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
@@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
461 host->max_id = MAX_TARGETS_2200; 482 host->max_id = MAX_TARGETS_2200;
462 host->transportt = qla2xxx_transport_vport_template; 483 host->transportt = qla2xxx_transport_vport_template;
463 484
464 DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n", 485 ql_dbg(ql_dbg_vport, vha, 0xa007,
465 vha->host_no, vha)); 486 "Detect vport hba %ld at address = %p.\n",
487 vha->host_no, vha);
466 488
467 vha->flags.init_done = 1; 489 vha->flags.init_done = 1;
468 490
@@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
567 if (req) { 589 if (req) {
568 ret = qla25xx_delete_req_que(vha, req); 590 ret = qla25xx_delete_req_que(vha, req);
569 if (ret != QLA_SUCCESS) { 591 if (ret != QLA_SUCCESS) {
570 qla_printk(KERN_WARNING, ha, 592 ql_log(ql_log_warn, vha, 0x00ea,
571 "Couldn't delete req que %d\n", 593 "Couldn't delete req que %d.\n",
572 req->id); 594 req->id);
573 return ret; 595 return ret;
574 } 596 }
575 } 597 }
@@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
581 if (rsp) { 603 if (rsp) {
582 ret = qla25xx_delete_rsp_que(vha, rsp); 604 ret = qla25xx_delete_rsp_que(vha, rsp);
583 if (ret != QLA_SUCCESS) { 605 if (ret != QLA_SUCCESS) {
584 qla_printk(KERN_WARNING, ha, 606 ql_log(ql_log_warn, vha, 0x00eb,
585 "Couldn't delete rsp que %d\n", 607 "Couldn't delete rsp que %d.\n",
586 rsp->id); 608 rsp->id);
587 return ret; 609 return ret;
588 } 610 }
589 } 611 }
@@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 626
605 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 627 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
606 if (req == NULL) { 628 if (req == NULL) {
607 qla_printk(KERN_WARNING, ha, "could not allocate memory" 629 ql_log(ql_log_fatal, base_vha, 0x00d9,
608 "for request que\n"); 630 "Failed to allocate memory for request queue.\n");
609 goto failed; 631 goto failed;
610 } 632 }
611 633
@@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
614 (req->length + 1) * sizeof(request_t), 636 (req->length + 1) * sizeof(request_t),
615 &req->dma, GFP_KERNEL); 637 &req->dma, GFP_KERNEL);
616 if (req->ring == NULL) { 638 if (req->ring == NULL) {
617 qla_printk(KERN_WARNING, ha, 639 ql_log(ql_log_fatal, base_vha, 0x00da,
618 "Memory Allocation failed - request_ring\n"); 640 "Failed to allocte memory for request_ring.\n");
619 goto que_failed; 641 goto que_failed;
620 } 642 }
621 643
@@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
623 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 645 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
624 if (que_id >= ha->max_req_queues) { 646 if (que_id >= ha->max_req_queues) {
625 mutex_unlock(&ha->vport_lock); 647 mutex_unlock(&ha->vport_lock);
626 qla_printk(KERN_INFO, ha, "No resources to create " 648 ql_log(ql_log_warn, base_vha, 0x00db,
627 "additional request queue\n"); 649 "No resources to create additional request queue.\n");
628 goto que_failed; 650 goto que_failed;
629 } 651 }
630 set_bit(que_id, ha->req_qid_map); 652 set_bit(que_id, ha->req_qid_map);
@@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
633 req->vp_idx = vp_idx; 655 req->vp_idx = vp_idx;
634 req->qos = qos; 656 req->qos = qos;
635 657
658 ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
659 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
660 que_id, req->rid, req->vp_idx, req->qos);
661 ql_dbg(ql_dbg_init, base_vha, 0x00dc,
662 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
663 que_id, req->rid, req->vp_idx, req->qos);
636 if (rsp_que < 0) 664 if (rsp_que < 0)
637 req->rsp = NULL; 665 req->rsp = NULL;
638 else 666 else
@@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
645 options |= BIT_5; 673 options |= BIT_5;
646 req->options = options; 674 req->options = options;
647 675
676 ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
677 "options=0x%x.\n", req->options);
678 ql_dbg(ql_dbg_init, base_vha, 0x00dd,
679 "options=0x%x.\n", req->options);
648 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 680 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
649 req->outstanding_cmds[cnt] = NULL; 681 req->outstanding_cmds[cnt] = NULL;
650 req->current_outstanding_cmd = 1; 682 req->current_outstanding_cmd = 1;
@@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
656 reg = ISP_QUE_REG(ha, que_id); 688 reg = ISP_QUE_REG(ha, que_id);
657 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 689 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
658 mutex_unlock(&ha->vport_lock); 690 mutex_unlock(&ha->vport_lock);
691 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
692 "ring_ptr=%p ring_index=%d, "
693 "cnt=%d id=%d max_q_depth=%d.\n",
694 req->ring_ptr, req->ring_index,
695 req->cnt, req->id, req->max_q_depth);
696 ql_dbg(ql_dbg_init, base_vha, 0x00de,
697 "ring_ptr=%p ring_index=%d, "
698 "cnt=%d id=%d max_q_depth=%d.\n",
699 req->ring_ptr, req->ring_index, req->cnt,
700 req->id, req->max_q_depth);
659 701
660 ret = qla25xx_init_req_que(base_vha, req); 702 ret = qla25xx_init_req_que(base_vha, req);
661 if (ret != QLA_SUCCESS) { 703 if (ret != QLA_SUCCESS) {
662 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); 704 ql_log(ql_log_fatal, base_vha, 0x00df,
705 "%s failed.\n", __func__);
663 mutex_lock(&ha->vport_lock); 706 mutex_lock(&ha->vport_lock);
664 clear_bit(que_id, ha->req_qid_map); 707 clear_bit(que_id, ha->req_qid_map);
665 mutex_unlock(&ha->vport_lock); 708 mutex_unlock(&ha->vport_lock);
@@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
700 743
701 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 744 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
702 if (rsp == NULL) { 745 if (rsp == NULL) {
703 qla_printk(KERN_WARNING, ha, "could not allocate memory for" 746 ql_log(ql_log_warn, base_vha, 0x0066,
704 " response que\n"); 747 "Failed to allocate memory for response queue.\n");
705 goto failed; 748 goto failed;
706 } 749 }
707 750
@@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
710 (rsp->length + 1) * sizeof(response_t), 753 (rsp->length + 1) * sizeof(response_t),
711 &rsp->dma, GFP_KERNEL); 754 &rsp->dma, GFP_KERNEL);
712 if (rsp->ring == NULL) { 755 if (rsp->ring == NULL) {
713 qla_printk(KERN_WARNING, ha, 756 ql_log(ql_log_warn, base_vha, 0x00e1,
714 "Memory Allocation failed - response_ring\n"); 757 "Failed to allocate memory for response ring.\n");
715 goto que_failed; 758 goto que_failed;
716 } 759 }
717 760
@@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
719 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 762 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
720 if (que_id >= ha->max_rsp_queues) { 763 if (que_id >= ha->max_rsp_queues) {
721 mutex_unlock(&ha->vport_lock); 764 mutex_unlock(&ha->vport_lock);
722 qla_printk(KERN_INFO, ha, "No resources to create " 765 ql_log(ql_log_warn, base_vha, 0x00e2,
723 "additional response queue\n"); 766 "No resources to create additional request queue.\n");
724 goto que_failed; 767 goto que_failed;
725 } 768 }
726 set_bit(que_id, ha->rsp_qid_map); 769 set_bit(que_id, ha->rsp_qid_map);
@@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 if (ha->flags.msix_enabled) 771 if (ha->flags.msix_enabled)
729 rsp->msix = &ha->msix_entries[que_id + 1]; 772 rsp->msix = &ha->msix_entries[que_id + 1];
730 else 773 else
731 qla_printk(KERN_WARNING, ha, "msix not enabled\n"); 774 ql_log(ql_log_warn, base_vha, 0x00e3,
775 "MSIX not enalbled.\n");
732 776
733 ha->rsp_q_map[que_id] = rsp; 777 ha->rsp_q_map[que_id] = rsp;
734 rsp->rid = rid; 778 rsp->rid = rid;
735 rsp->vp_idx = vp_idx; 779 rsp->vp_idx = vp_idx;
736 rsp->hw = ha; 780 rsp->hw = ha;
781 ql_dbg(ql_dbg_init, base_vha, 0x00e4,
782 "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
783 que_id, rsp->rid, rsp->vp_idx, rsp->hw);
737 /* Use alternate PCI bus number */ 784 /* Use alternate PCI bus number */
738 if (MSB(rsp->rid)) 785 if (MSB(rsp->rid))
739 options |= BIT_4; 786 options |= BIT_4;
@@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
750 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 797 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
751 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; 798 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
752 mutex_unlock(&ha->vport_lock); 799 mutex_unlock(&ha->vport_lock);
800 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
801 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
802 rsp->options, rsp->id, rsp->rsp_q_in,
803 rsp->rsp_q_out);
804 ql_dbg(ql_dbg_init, base_vha, 0x00e5,
805 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
806 rsp->options, rsp->id, rsp->rsp_q_in,
807 rsp->rsp_q_out);
753 808
754 ret = qla25xx_request_irq(rsp); 809 ret = qla25xx_request_irq(rsp);
755 if (ret) 810 if (ret)
@@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
757 812
758 ret = qla25xx_init_rsp_que(base_vha, rsp); 813 ret = qla25xx_init_rsp_que(base_vha, rsp);
759 if (ret != QLA_SUCCESS) { 814 if (ret != QLA_SUCCESS) {
760 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); 815 ql_log(ql_log_fatal, base_vha, 0x00e7,
816 "%s failed.\n", __func__);
761 mutex_lock(&ha->vport_lock); 817 mutex_lock(&ha->vport_lock);
762 clear_bit(que_id, ha->rsp_qid_map); 818 clear_bit(que_id, ha->rsp_qid_map);
763 mutex_unlock(&ha->vport_lock); 819 mutex_unlock(&ha->vport_lock);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index e1138bcc834c..5cbf33a50b14 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -348,6 +348,7 @@ static void
348qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) 348qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
349{ 349{
350 u32 win_read; 350 u32 win_read;
351 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
351 352
352 ha->crb_win = CRB_HI(*off); 353 ha->crb_win = CRB_HI(*off);
353 writel(ha->crb_win, 354 writel(ha->crb_win,
@@ -358,9 +359,10 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
358 */ 359 */
359 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); 360 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
360 if (win_read != ha->crb_win) { 361 if (win_read != ha->crb_win) {
361 DEBUG2(qla_printk(KERN_INFO, ha, 362 ql_dbg(ql_dbg_p3p, vha, 0xb000,
362 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), " 363 "%s: Written crbwin (0x%x) "
363 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); 364 "!= Read crbwin (0x%x), off=0x%lx.\n",
365 ha->crb_win, win_read, *off);
364 } 366 }
365 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; 367 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
366} 368}
@@ -368,6 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
368static inline unsigned long 370static inline unsigned long
369qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) 371qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
370{ 372{
373 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
371 /* See if we are currently pointing to the region we want to use next */ 374 /* See if we are currently pointing to the region we want to use next */
372 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { 375 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
373 /* No need to change window. PCIX and PCIEregs are in both 376 /* No need to change window. PCIX and PCIEregs are in both
@@ -398,9 +401,10 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
398 return off; 401 return off;
399 } 402 }
400 /* strange address given */ 403 /* strange address given */
401 qla_printk(KERN_WARNING, ha, 404 ql_dbg(ql_dbg_p3p, vha, 0xb001,
402 "%s: Warning: unm_nic_pci_set_crbwindow called with" 405 "%x: Warning: unm_nic_pci_set_crbwindow "
403 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off); 406 "called with an unknown address(%llx).\n",
407 QLA2XXX_DRIVER_NAME, off);
404 return off; 408 return off;
405} 409}
406 410
@@ -563,6 +567,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
563{ 567{
564 int window; 568 int window;
565 u32 win_read; 569 u32 win_read;
570 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
566 571
567 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 572 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
568 QLA82XX_ADDR_DDR_NET_MAX)) { 573 QLA82XX_ADDR_DDR_NET_MAX)) {
@@ -574,8 +579,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
574 win_read = qla82xx_rd_32(ha, 579 win_read = qla82xx_rd_32(ha,
575 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); 580 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
576 if ((win_read << 17) != window) { 581 if ((win_read << 17) != window) {
577 qla_printk(KERN_WARNING, ha, 582 ql_dbg(ql_dbg_p3p, vha, 0xb003,
578 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", 583 "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
579 __func__, window, win_read); 584 __func__, window, win_read);
580 } 585 }
581 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 586 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
@@ -583,7 +588,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
583 QLA82XX_ADDR_OCM0_MAX)) { 588 QLA82XX_ADDR_OCM0_MAX)) {
584 unsigned int temp1; 589 unsigned int temp1;
585 if ((addr & 0x00ff800) == 0xff800) { 590 if ((addr & 0x00ff800) == 0xff800) {
586 qla_printk(KERN_WARNING, ha, 591 ql_log(ql_log_warn, vha, 0xb004,
587 "%s: QM access not handled.\n", __func__); 592 "%s: QM access not handled.\n", __func__);
588 addr = -1UL; 593 addr = -1UL;
589 } 594 }
@@ -596,8 +601,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
596 temp1 = ((window & 0x1FF) << 7) | 601 temp1 = ((window & 0x1FF) << 7) |
597 ((window & 0x0FFFE0000) >> 17); 602 ((window & 0x0FFFE0000) >> 17);
598 if (win_read != temp1) { 603 if (win_read != temp1) {
599 qla_printk(KERN_WARNING, ha, 604 ql_log(ql_log_warn, vha, 0xb005,
600 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", 605 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
601 __func__, temp1, win_read); 606 __func__, temp1, win_read);
602 } 607 }
603 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 608 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
@@ -612,8 +617,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
612 win_read = qla82xx_rd_32(ha, 617 win_read = qla82xx_rd_32(ha,
613 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); 618 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
614 if (win_read != window) { 619 if (win_read != window) {
615 qla_printk(KERN_WARNING, ha, 620 ql_log(ql_log_warn, vha, 0xb006,
616 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", 621 "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
617 __func__, window, win_read); 622 __func__, window, win_read);
618 } 623 }
619 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; 624 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
@@ -624,9 +629,9 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
624 */ 629 */
625 if ((qla82xx_pci_set_window_warning_count++ < 8) || 630 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
626 (qla82xx_pci_set_window_warning_count%64 == 0)) { 631 (qla82xx_pci_set_window_warning_count%64 == 0)) {
627 qla_printk(KERN_WARNING, ha, 632 ql_log(ql_log_warn, vha, 0xb007,
628 "%s: Warning:%s Unknown address range!\n", __func__, 633 "%s: Warning:%s Unknown address range!.\n",
629 QLA2XXX_DRIVER_NAME); 634 __func__, QLA2XXX_DRIVER_NAME);
630 } 635 }
631 addr = -1UL; 636 addr = -1UL;
632 } 637 }
@@ -671,6 +676,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
671 uint8_t *mem_ptr = NULL; 676 uint8_t *mem_ptr = NULL;
672 unsigned long mem_base; 677 unsigned long mem_base;
673 unsigned long mem_page; 678 unsigned long mem_page;
679 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
674 680
675 write_lock_irqsave(&ha->hw_lock, flags); 681 write_lock_irqsave(&ha->hw_lock, flags);
676 682
@@ -682,9 +688,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
682 if ((start == -1UL) || 688 if ((start == -1UL) ||
683 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 689 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
684 write_unlock_irqrestore(&ha->hw_lock, flags); 690 write_unlock_irqrestore(&ha->hw_lock, flags);
685 qla_printk(KERN_ERR, ha, 691 ql_log(ql_log_fatal, vha, 0xb008,
686 "%s out of bound pci memory access. " 692 "%s out of bound pci memory "
687 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 693 "access, offset is 0x%llx.\n",
694 QLA2XXX_DRIVER_NAME, off);
688 return -1; 695 return -1;
689 } 696 }
690 697
@@ -741,6 +748,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
741 uint8_t *mem_ptr = NULL; 748 uint8_t *mem_ptr = NULL;
742 unsigned long mem_base; 749 unsigned long mem_base;
743 unsigned long mem_page; 750 unsigned long mem_page;
751 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
744 752
745 write_lock_irqsave(&ha->hw_lock, flags); 753 write_lock_irqsave(&ha->hw_lock, flags);
746 754
@@ -752,9 +760,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
752 if ((start == -1UL) || 760 if ((start == -1UL) ||
753 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 761 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
754 write_unlock_irqrestore(&ha->hw_lock, flags); 762 write_unlock_irqrestore(&ha->hw_lock, flags);
755 qla_printk(KERN_ERR, ha, 763 ql_log(ql_log_fatal, vha, 0xb009,
756 "%s out of bound pci memory access. " 764 "%s out of bount memory "
757 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 765 "access, offset is 0x%llx.\n",
766 QLA2XXX_DRIVER_NAME, off);
758 return -1; 767 return -1;
759 } 768 }
760 769
@@ -855,15 +864,16 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha)
855{ 864{
856 long timeout = 0; 865 long timeout = 0;
857 long done = 0 ; 866 long done = 0 ;
867 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
858 868
859 while (done == 0) { 869 while (done == 0) {
860 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 870 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
861 done &= 4; 871 done &= 4;
862 timeout++; 872 timeout++;
863 if (timeout >= rom_max_timeout) { 873 if (timeout >= rom_max_timeout) {
864 DEBUG(qla_printk(KERN_INFO, ha, 874 ql_dbg(ql_dbg_p3p, vha, 0xb00a,
865 "%s: Timeout reached waiting for rom busy", 875 "%s: Timeout reached waiting for rom busy.\n",
866 QLA2XXX_DRIVER_NAME)); 876 QLA2XXX_DRIVER_NAME);
867 return -1; 877 return -1;
868 } 878 }
869 } 879 }
@@ -875,15 +885,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
875{ 885{
876 long timeout = 0; 886 long timeout = 0;
877 long done = 0 ; 887 long done = 0 ;
888 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
878 889
879 while (done == 0) { 890 while (done == 0) {
880 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 891 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
881 done &= 2; 892 done &= 2;
882 timeout++; 893 timeout++;
883 if (timeout >= rom_max_timeout) { 894 if (timeout >= rom_max_timeout) {
884 DEBUG(qla_printk(KERN_INFO, ha, 895 ql_dbg(ql_dbg_p3p, vha, 0xb00b,
885 "%s: Timeout reached waiting for rom done", 896 "%s: Timeout reached waiting for rom done.\n",
886 QLA2XXX_DRIVER_NAME)); 897 QLA2XXX_DRIVER_NAME);
887 return -1; 898 return -1;
888 } 899 }
889 } 900 }
@@ -893,15 +904,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
893static int 904static int
894qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 905qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
895{ 906{
907 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
908
896 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 909 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
897 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 910 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
898 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 911 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
899 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); 912 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
900 qla82xx_wait_rom_busy(ha); 913 qla82xx_wait_rom_busy(ha);
901 if (qla82xx_wait_rom_done(ha)) { 914 if (qla82xx_wait_rom_done(ha)) {
902 qla_printk(KERN_WARNING, ha, 915 ql_log(ql_log_fatal, vha, 0x00ba,
903 "%s: Error waiting for rom done\n", 916 "Error waiting for rom done.\n");
904 QLA2XXX_DRIVER_NAME);
905 return -1; 917 return -1;
906 } 918 }
907 /* Reset abyte_cnt and dummy_byte_cnt */ 919 /* Reset abyte_cnt and dummy_byte_cnt */
@@ -917,6 +929,7 @@ static int
917qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 929qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
918{ 930{
919 int ret, loops = 0; 931 int ret, loops = 0;
932 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
920 933
921 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 934 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
922 udelay(100); 935 udelay(100);
@@ -924,9 +937,8 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
924 loops++; 937 loops++;
925 } 938 }
926 if (loops >= 50000) { 939 if (loops >= 50000) {
927 qla_printk(KERN_INFO, ha, 940 ql_log(ql_log_fatal, vha, 0x00b9,
928 "%s: qla82xx_rom_lock failed\n", 941 "Failed to aquire SEM2 lock.\n");
929 QLA2XXX_DRIVER_NAME);
930 return -1; 942 return -1;
931 } 943 }
932 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 944 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -937,11 +949,12 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
937static int 949static int
938qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) 950qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
939{ 951{
952 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
940 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); 953 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
941 qla82xx_wait_rom_busy(ha); 954 qla82xx_wait_rom_busy(ha);
942 if (qla82xx_wait_rom_done(ha)) { 955 if (qla82xx_wait_rom_done(ha)) {
943 qla_printk(KERN_WARNING, ha, 956 ql_log(ql_log_warn, vha, 0xb00c,
944 "Error waiting for rom done\n"); 957 "Error waiting for rom done.\n");
945 return -1; 958 return -1;
946 } 959 }
947 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 960 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
@@ -955,6 +968,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
955 uint32_t done = 1 ; 968 uint32_t done = 1 ;
956 uint32_t val; 969 uint32_t val;
957 int ret = 0; 970 int ret = 0;
971 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
958 972
959 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 973 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
960 while ((done != 0) && (ret == 0)) { 974 while ((done != 0) && (ret == 0)) {
@@ -964,8 +978,8 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
964 udelay(10); 978 udelay(10);
965 cond_resched(); 979 cond_resched();
966 if (timeout >= 50000) { 980 if (timeout >= 50000) {
967 qla_printk(KERN_WARNING, ha, 981 ql_log(ql_log_warn, vha, 0xb00d,
968 "Timeout reached waiting for write finish"); 982 "Timeout reached waiting for write finish.\n");
969 return -1; 983 return -1;
970 } 984 }
971 } 985 }
@@ -992,13 +1006,14 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
992static int 1006static int
993qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) 1007qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
994{ 1008{
1009 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
995 if (qla82xx_flash_set_write_enable(ha)) 1010 if (qla82xx_flash_set_write_enable(ha))
996 return -1; 1011 return -1;
997 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); 1012 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
998 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); 1013 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
999 if (qla82xx_wait_rom_done(ha)) { 1014 if (qla82xx_wait_rom_done(ha)) {
1000 qla_printk(KERN_WARNING, ha, 1015 ql_log(ql_log_warn, vha, 0xb00e,
1001 "Error waiting for rom done\n"); 1016 "Error waiting for rom done.\n");
1002 return -1; 1017 return -1;
1003 } 1018 }
1004 return qla82xx_flash_wait_write_finish(ha); 1019 return qla82xx_flash_wait_write_finish(ha);
@@ -1007,10 +1022,11 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1007static int 1022static int
1008qla82xx_write_disable_flash(struct qla_hw_data *ha) 1023qla82xx_write_disable_flash(struct qla_hw_data *ha)
1009{ 1024{
1025 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1010 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); 1026 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1011 if (qla82xx_wait_rom_done(ha)) { 1027 if (qla82xx_wait_rom_done(ha)) {
1012 qla_printk(KERN_WARNING, ha, 1028 ql_log(ql_log_warn, vha, 0xb00f,
1013 "Error waiting for rom done\n"); 1029 "Error waiting for rom done.\n");
1014 return -1; 1030 return -1;
1015 } 1031 }
1016 return 0; 1032 return 0;
@@ -1020,13 +1036,16 @@ static int
1020ql82xx_rom_lock_d(struct qla_hw_data *ha) 1036ql82xx_rom_lock_d(struct qla_hw_data *ha)
1021{ 1037{
1022 int loops = 0; 1038 int loops = 0;
1039 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040
1023 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 1041 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1024 udelay(100); 1042 udelay(100);
1025 cond_resched(); 1043 cond_resched();
1026 loops++; 1044 loops++;
1027 } 1045 }
1028 if (loops >= 50000) { 1046 if (loops >= 50000) {
1029 qla_printk(KERN_WARNING, ha, "ROM lock failed\n"); 1047 ql_log(ql_log_warn, vha, 0xb010,
1048 "ROM lock failed.\n");
1030 return -1; 1049 return -1;
1031 } 1050 }
1032 return 0;; 1051 return 0;;
@@ -1037,10 +1056,12 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1037 uint32_t data) 1056 uint32_t data)
1038{ 1057{
1039 int ret = 0; 1058 int ret = 0;
1059 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040 1060
1041 ret = ql82xx_rom_lock_d(ha); 1061 ret = ql82xx_rom_lock_d(ha);
1042 if (ret < 0) { 1062 if (ret < 0) {
1043 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 1063 ql_log(ql_log_warn, vha, 0xb011,
1064 "ROM lock failed.\n");
1044 return ret; 1065 return ret;
1045 } 1066 }
1046 1067
@@ -1053,8 +1074,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1053 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); 1074 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1054 qla82xx_wait_rom_busy(ha); 1075 qla82xx_wait_rom_busy(ha);
1055 if (qla82xx_wait_rom_done(ha)) { 1076 if (qla82xx_wait_rom_done(ha)) {
1056 qla_printk(KERN_WARNING, ha, 1077 ql_log(ql_log_warn, vha, 0xb012,
1057 "Error waiting for rom done\n"); 1078 "Error waiting for rom done.\n");
1058 ret = -1; 1079 ret = -1;
1059 goto done_write; 1080 goto done_write;
1060 } 1081 }
@@ -1159,8 +1180,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1159 */ 1180 */
1160 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || 1181 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1161 qla82xx_rom_fast_read(ha, 4, &n) != 0) { 1182 qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1162 qla_printk(KERN_WARNING, ha, 1183 ql_log(ql_log_fatal, vha, 0x006e,
1163 "[ERROR] Reading crb_init area: n: %08x\n", n); 1184 "Error Reading crb_init area: n: %08x.\n", n);
1164 return -1; 1185 return -1;
1165 } 1186 }
1166 1187
@@ -1172,20 +1193,18 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1172 1193
1173 /* number of addr/value pair should not exceed 1024 enteries */ 1194 /* number of addr/value pair should not exceed 1024 enteries */
1174 if (n >= 1024) { 1195 if (n >= 1024) {
1175 qla_printk(KERN_WARNING, ha, 1196 ql_log(ql_log_fatal, vha, 0x0071,
1176 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", 1197 "Card flash not initialized:n=0x%x.\n", n);
1177 QLA2XXX_DRIVER_NAME, __func__, n);
1178 return -1; 1198 return -1;
1179 } 1199 }
1180 1200
1181 qla_printk(KERN_INFO, ha, 1201 ql_log(ql_log_info, vha, 0x0072,
1182 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n); 1202 "%d CRB init values found in ROM.\n", n);
1183 1203
1184 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); 1204 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1185 if (buf == NULL) { 1205 if (buf == NULL) {
1186 qla_printk(KERN_WARNING, ha, 1206 ql_log(ql_log_fatal, vha, 0x010c,
1187 "%s: [ERROR] Unable to malloc memory.\n", 1207 "Unable to allocate memory.\n");
1188 QLA2XXX_DRIVER_NAME);
1189 return -1; 1208 return -1;
1190 } 1209 }
1191 1210
@@ -1236,9 +1255,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1236 continue; 1255 continue;
1237 1256
1238 if (off == ADDR_ERROR) { 1257 if (off == ADDR_ERROR) {
1239 qla_printk(KERN_WARNING, ha, 1258 ql_log(ql_log_fatal, vha, 0x0116,
1240 "%s: [ERROR] Unknown addr: 0x%08lx\n", 1259 "Unknow addr: 0x%08lx.\n", buf[i].addr);
1241 QLA2XXX_DRIVER_NAME, buf[i].addr);
1242 continue; 1260 continue;
1243 } 1261 }
1244 1262
@@ -1370,7 +1388,7 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1370 if (j >= MAX_CTL_CHECK) { 1388 if (j >= MAX_CTL_CHECK) {
1371 if (printk_ratelimit()) 1389 if (printk_ratelimit())
1372 dev_err(&ha->pdev->dev, 1390 dev_err(&ha->pdev->dev,
1373 "failed to write through agent\n"); 1391 "failed to write through agent.\n");
1374 ret = -1; 1392 ret = -1;
1375 break; 1393 break;
1376 } 1394 }
@@ -1460,7 +1478,7 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1460 if (j >= MAX_CTL_CHECK) { 1478 if (j >= MAX_CTL_CHECK) {
1461 if (printk_ratelimit()) 1479 if (printk_ratelimit())
1462 dev_err(&ha->pdev->dev, 1480 dev_err(&ha->pdev->dev,
1463 "failed to read through agent\n"); 1481 "failed to read through agent.\n");
1464 break; 1482 break;
1465 } 1483 }
1466 1484
@@ -1633,17 +1651,15 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1633 uint32_t len = 0; 1651 uint32_t len = 0;
1634 1652
1635 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { 1653 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1636 qla_printk(KERN_WARNING, ha, 1654 ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
1637 "Failed to reserve selected regions (%s)\n", 1655 "Failed to reserver selected regions.\n");
1638 pci_name(ha->pdev));
1639 goto iospace_error_exit; 1656 goto iospace_error_exit;
1640 } 1657 }
1641 1658
1642 /* Use MMIO operations for all accesses. */ 1659 /* Use MMIO operations for all accesses. */
1643 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1660 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1644 qla_printk(KERN_ERR, ha, 1661 ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
1645 "region #0 not an MMIO resource (%s), aborting\n", 1662 "Region #0 not an MMIO resource, aborting.\n");
1646 pci_name(ha->pdev));
1647 goto iospace_error_exit; 1663 goto iospace_error_exit;
1648 } 1664 }
1649 1665
@@ -1651,9 +1667,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1651 ha->nx_pcibase = 1667 ha->nx_pcibase =
1652 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); 1668 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1653 if (!ha->nx_pcibase) { 1669 if (!ha->nx_pcibase) {
1654 qla_printk(KERN_ERR, ha, 1670 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1655 "cannot remap pcibase MMIO (%s), aborting\n", 1671 "Cannot remap pcibase MMIO, aborting.\n");
1656 pci_name(ha->pdev));
1657 pci_release_regions(ha->pdev); 1672 pci_release_regions(ha->pdev);
1658 goto iospace_error_exit; 1673 goto iospace_error_exit;
1659 } 1674 }
@@ -1667,9 +1682,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1667 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + 1682 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1668 (ha->pdev->devfn << 12)), 4); 1683 (ha->pdev->devfn << 12)), 4);
1669 if (!ha->nxdb_wr_ptr) { 1684 if (!ha->nxdb_wr_ptr) {
1670 qla_printk(KERN_ERR, ha, 1685 ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
1671 "cannot remap MMIO (%s), aborting\n", 1686 "Cannot remap MMIO, aborting.\n");
1672 pci_name(ha->pdev));
1673 pci_release_regions(ha->pdev); 1687 pci_release_regions(ha->pdev);
1674 goto iospace_error_exit; 1688 goto iospace_error_exit;
1675 } 1689 }
@@ -1687,6 +1701,16 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1687 1701
1688 ha->max_req_queues = ha->max_rsp_queues = 1; 1702 ha->max_req_queues = ha->max_rsp_queues = 1;
1689 ha->msix_count = ha->max_rsp_queues + 1; 1703 ha->msix_count = ha->max_rsp_queues + 1;
1704 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1705 "nx_pci_base=%p iobase=%p "
1706 "max_req_queues=%d msix_count=%d.\n",
1707 ha->nx_pcibase, ha->iobase,
1708 ha->max_req_queues, ha->msix_count);
1709 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1710 "nx_pci_base=%p iobase=%p "
1711 "max_req_queues=%d msix_count=%d.\n",
1712 ha->nx_pcibase, ha->iobase,
1713 ha->max_req_queues, ha->msix_count);
1690 return 0; 1714 return 0;
1691 1715
1692iospace_error_exit: 1716iospace_error_exit:
@@ -1712,6 +1736,9 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
1712 pci_set_master(ha->pdev); 1736 pci_set_master(ha->pdev);
1713 ret = pci_set_mwi(ha->pdev); 1737 ret = pci_set_mwi(ha->pdev);
1714 ha->chip_revision = ha->pdev->revision; 1738 ha->chip_revision = ha->pdev->revision;
1739 ql_dbg(ql_dbg_init, vha, 0x0043,
1740 "Chip revision:%ld.\n",
1741 ha->chip_revision);
1715 return 0; 1742 return 0;
1716} 1743}
1717 1744
@@ -1877,6 +1904,7 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1877{ 1904{
1878 u32 val = 0; 1905 u32 val = 0;
1879 int retries = 60; 1906 int retries = 60;
1907 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1880 1908
1881 do { 1909 do {
1882 read_lock(&ha->hw_lock); 1910 read_lock(&ha->hw_lock);
@@ -1892,15 +1920,15 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1892 default: 1920 default:
1893 break; 1921 break;
1894 } 1922 }
1895 qla_printk(KERN_WARNING, ha, 1923 ql_log(ql_log_info, vha, 0x00a8,
1896 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n", 1924 "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
1897 val, retries); 1925 val, retries);
1898 1926
1899 msleep(500); 1927 msleep(500);
1900 1928
1901 } while (--retries); 1929 } while (--retries);
1902 1930
1903 qla_printk(KERN_INFO, ha, 1931 ql_log(ql_log_fatal, vha, 0x00a9,
1904 "Cmd Peg initialization failed: 0x%x.\n", val); 1932 "Cmd Peg initialization failed: 0x%x.\n", val);
1905 1933
1906 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1934 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1915,6 +1943,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1915{ 1943{
1916 u32 val = 0; 1944 u32 val = 0;
1917 int retries = 60; 1945 int retries = 60;
1946 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1918 1947
1919 do { 1948 do {
1920 read_lock(&ha->hw_lock); 1949 read_lock(&ha->hw_lock);
@@ -1930,17 +1959,16 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1930 default: 1959 default:
1931 break; 1960 break;
1932 } 1961 }
1933 1962 ql_log(ql_log_info, vha, 0x00ab,
1934 qla_printk(KERN_WARNING, ha, 1963 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
1935 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n", 1964 val, retries);
1936 val, retries);
1937 1965
1938 msleep(500); 1966 msleep(500);
1939 1967
1940 } while (--retries); 1968 } while (--retries);
1941 1969
1942 qla_printk(KERN_INFO, ha, 1970 ql_log(ql_log_fatal, vha, 0x00ac,
1943 "Rcv Peg initialization failed: 0x%x.\n", val); 1971 "Rcv Peg initializatin failed: 0x%x.\n", val);
1944 read_lock(&ha->hw_lock); 1972 read_lock(&ha->hw_lock);
1945 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); 1973 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1946 read_unlock(&ha->hw_lock); 1974 read_unlock(&ha->hw_lock);
@@ -1989,13 +2017,11 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1989 } 2017 }
1990 2018
1991 if (ha->mcp) { 2019 if (ha->mcp) {
1992 DEBUG3_11(printk(KERN_INFO "%s(%ld): " 2020 ql_dbg(ql_dbg_async, vha, 0x5052,
1993 "Got mailbox completion. cmd=%x.\n", 2021 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1994 __func__, vha->host_no, ha->mcp->mb[0]));
1995 } else { 2022 } else {
1996 qla_printk(KERN_INFO, ha, 2023 ql_dbg(ql_dbg_async, vha, 0x5053,
1997 "%s(%ld): MBX pointer ERROR!\n", 2024 "MBX pointer ERROR.\n");
1998 __func__, vha->host_no);
1999 } 2025 }
2000} 2026}
2001 2027
@@ -2019,13 +2045,13 @@ qla82xx_intr_handler(int irq, void *dev_id)
2019 int status = 0, status1 = 0; 2045 int status = 0, status1 = 0;
2020 unsigned long flags; 2046 unsigned long flags;
2021 unsigned long iter; 2047 unsigned long iter;
2022 uint32_t stat; 2048 uint32_t stat = 0;
2023 uint16_t mb[4]; 2049 uint16_t mb[4];
2024 2050
2025 rsp = (struct rsp_que *) dev_id; 2051 rsp = (struct rsp_que *) dev_id;
2026 if (!rsp) { 2052 if (!rsp) {
2027 printk(KERN_INFO 2053 printk(KERN_INFO
2028 "%s(): NULL response queue pointer\n", __func__); 2054 "%s(): NULL response queue pointer.\n", __func__);
2029 return IRQ_NONE; 2055 return IRQ_NONE;
2030 } 2056 }
2031 ha = rsp->hw; 2057 ha = rsp->hw;
@@ -2075,9 +2101,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
2075 qla24xx_process_response_queue(vha, rsp); 2101 qla24xx_process_response_queue(vha, rsp);
2076 break; 2102 break;
2077 default: 2103 default:
2078 DEBUG2(printk("scsi(%ld): " 2104 ql_dbg(ql_dbg_async, vha, 0x5054,
2079 " Unrecognized interrupt type (%d).\n", 2105 "Unrecognized interrupt type (%d).\n",
2080 vha->host_no, stat & 0xff)); 2106 stat & 0xff);
2081 break; 2107 break;
2082 } 2108 }
2083 } 2109 }
@@ -2089,8 +2115,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
2089 2115
2090#ifdef QL_DEBUG_LEVEL_17 2116#ifdef QL_DEBUG_LEVEL_17
2091 if (!irq && ha->flags.eeh_busy) 2117 if (!irq && ha->flags.eeh_busy)
2092 qla_printk(KERN_WARNING, ha, 2118 ql_log(ql_log_warn, vha, 0x503d,
2093 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2119 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2094 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2120 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2095#endif 2121#endif
2096 2122
@@ -2111,13 +2137,13 @@ qla82xx_msix_default(int irq, void *dev_id)
2111 struct device_reg_82xx __iomem *reg; 2137 struct device_reg_82xx __iomem *reg;
2112 int status = 0; 2138 int status = 0;
2113 unsigned long flags; 2139 unsigned long flags;
2114 uint32_t stat; 2140 uint32_t stat = 0;
2115 uint16_t mb[4]; 2141 uint16_t mb[4];
2116 2142
2117 rsp = (struct rsp_que *) dev_id; 2143 rsp = (struct rsp_que *) dev_id;
2118 if (!rsp) { 2144 if (!rsp) {
2119 printk(KERN_INFO 2145 printk(KERN_INFO
2120 "%s(): NULL response queue pointer\n", __func__); 2146 "%s(): NULL response queue pointer.\n", __func__);
2121 return IRQ_NONE; 2147 return IRQ_NONE;
2122 } 2148 }
2123 ha = rsp->hw; 2149 ha = rsp->hw;
@@ -2149,9 +2175,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2149 qla24xx_process_response_queue(vha, rsp); 2175 qla24xx_process_response_queue(vha, rsp);
2150 break; 2176 break;
2151 default: 2177 default:
2152 DEBUG2(printk("scsi(%ld): " 2178 ql_dbg(ql_dbg_async, vha, 0x5041,
2153 " Unrecognized interrupt type (%d).\n", 2179 "Unrecognized interrupt type (%d).\n",
2154 vha->host_no, stat & 0xff)); 2180 stat & 0xff);
2155 break; 2181 break;
2156 } 2182 }
2157 } 2183 }
@@ -2162,9 +2188,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2162 2188
2163#ifdef QL_DEBUG_LEVEL_17 2189#ifdef QL_DEBUG_LEVEL_17
2164 if (!irq && ha->flags.eeh_busy) 2190 if (!irq && ha->flags.eeh_busy)
2165 qla_printk(KERN_WARNING, ha, 2191 ql_log(ql_log_warn, vha, 0x5044,
2166 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2192 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2167 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2193 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2168#endif 2194#endif
2169 2195
2170 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2196 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
@@ -2186,7 +2212,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2186 rsp = (struct rsp_que *) dev_id; 2212 rsp = (struct rsp_que *) dev_id;
2187 if (!rsp) { 2213 if (!rsp) {
2188 printk(KERN_INFO 2214 printk(KERN_INFO
2189 "%s(): NULL response queue pointer\n", __func__); 2215 "%s(): NULL response queue pointer.\n", __func__);
2190 return IRQ_NONE; 2216 return IRQ_NONE;
2191 } 2217 }
2192 2218
@@ -2215,7 +2241,7 @@ qla82xx_poll(int irq, void *dev_id)
2215 rsp = (struct rsp_que *) dev_id; 2241 rsp = (struct rsp_que *) dev_id;
2216 if (!rsp) { 2242 if (!rsp) {
2217 printk(KERN_INFO 2243 printk(KERN_INFO
2218 "%s(): NULL response queue pointer\n", __func__); 2244 "%s(): NULL response queue pointer.\n", __func__);
2219 return; 2245 return;
2220 } 2246 }
2221 ha = rsp->hw; 2247 ha = rsp->hw;
@@ -2245,9 +2271,9 @@ qla82xx_poll(int irq, void *dev_id)
2245 qla24xx_process_response_queue(vha, rsp); 2271 qla24xx_process_response_queue(vha, rsp);
2246 break; 2272 break;
2247 default: 2273 default:
2248 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2274 ql_dbg(ql_dbg_p3p, vha, 0xb013,
2249 "(%d).\n", 2275 "Unrecognized interrupt type (%d).\n",
2250 vha->host_no, stat & 0xff)); 2276 stat * 0xff);
2251 break; 2277 break;
2252 } 2278 }
2253 } 2279 }
@@ -2347,9 +2373,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2347 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2373 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2348 } 2374 }
2349 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2375 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2350 qla_printk(KERN_INFO, ha, 2376 ql_log(ql_log_info, vha, 0x00bb,
2351 "%s(%ld):drv_state = 0x%x\n", 2377 "drv_state = 0x%x.\n", drv_state);
2352 __func__, vha->host_no, drv_state);
2353 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2378 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2354} 2379}
2355 2380
@@ -2392,8 +2417,8 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2392 struct qla_hw_data *ha = vha->hw; 2417 struct qla_hw_data *ha = vha->hw;
2393 2418
2394 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { 2419 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2395 qla_printk(KERN_ERR, ha, 2420 ql_log(ql_log_fatal, vha, 0x009f,
2396 "%s: Error during CRB Initialization\n", __func__); 2421 "Error during CRB initialization.\n");
2397 return QLA_FUNCTION_FAILED; 2422 return QLA_FUNCTION_FAILED;
2398 } 2423 }
2399 udelay(500); 2424 udelay(500);
@@ -2411,27 +2436,27 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2411 if (ql2xfwloadbin == 2) 2436 if (ql2xfwloadbin == 2)
2412 goto try_blob_fw; 2437 goto try_blob_fw;
2413 2438
2414 qla_printk(KERN_INFO, ha, 2439 ql_log(ql_log_info, vha, 0x00a0,
2415 "Attempting to load firmware from flash\n"); 2440 "Attempting to load firmware from flash.\n");
2416 2441
2417 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2442 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2418 qla_printk(KERN_ERR, ha, 2443 ql_log(ql_log_info, vha, 0x00a1,
2419 "Firmware loaded successfully from flash\n"); 2444 "Firmware loaded successully from flash.\n");
2420 return QLA_SUCCESS; 2445 return QLA_SUCCESS;
2421 } else { 2446 } else {
2422 qla_printk(KERN_ERR, ha, 2447 ql_log(ql_log_warn, vha, 0x0108,
2423 "Firmware load from flash failed\n"); 2448 "Firmware load from flash failed.\n");
2424 } 2449 }
2425 2450
2426try_blob_fw: 2451try_blob_fw:
2427 qla_printk(KERN_INFO, ha, 2452 ql_log(ql_log_info, vha, 0x00a2,
2428 "Attempting to load firmware from blob\n"); 2453 "Attempting to load firmware from blob.\n");
2429 2454
2430 /* Load firmware blob. */ 2455 /* Load firmware blob. */
2431 blob = ha->hablob = qla2x00_request_firmware(vha); 2456 blob = ha->hablob = qla2x00_request_firmware(vha);
2432 if (!blob) { 2457 if (!blob) {
2433 qla_printk(KERN_ERR, ha, 2458 ql_log(ql_log_fatal, vha, 0x00a3,
2434 "Firmware image not present.\n"); 2459 "Firmware image not preset.\n");
2435 goto fw_load_failed; 2460 goto fw_load_failed;
2436 } 2461 }
2437 2462
@@ -2441,20 +2466,19 @@ try_blob_fw:
2441 /* Fallback to URI format */ 2466 /* Fallback to URI format */
2442 if (qla82xx_validate_firmware_blob(vha, 2467 if (qla82xx_validate_firmware_blob(vha,
2443 QLA82XX_UNIFIED_ROMIMAGE)) { 2468 QLA82XX_UNIFIED_ROMIMAGE)) {
2444 qla_printk(KERN_ERR, ha, 2469 ql_log(ql_log_fatal, vha, 0x00a4,
2445 "No valid firmware image found!!!"); 2470 "No valid firmware image found.\n");
2446 return QLA_FUNCTION_FAILED; 2471 return QLA_FUNCTION_FAILED;
2447 } 2472 }
2448 } 2473 }
2449 2474
2450 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { 2475 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2451 qla_printk(KERN_ERR, ha, 2476 ql_log(ql_log_info, vha, 0x00a5,
2452 "%s: Firmware loaded successfully " 2477 "Firmware loaded successfully from binary blob.\n");
2453 " from binary blob\n", __func__);
2454 return QLA_SUCCESS; 2478 return QLA_SUCCESS;
2455 } else { 2479 } else {
2456 qla_printk(KERN_ERR, ha, 2480 ql_log(ql_log_fatal, vha, 0x00a6,
2457 "Firmware load failed from binary blob\n"); 2481 "Firmware load failed for binary blob.\n");
2458 blob->fw = NULL; 2482 blob->fw = NULL;
2459 blob = NULL; 2483 blob = NULL;
2460 goto fw_load_failed; 2484 goto fw_load_failed;
@@ -2486,15 +2510,15 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2486 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 2510 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2487 2511
2488 if (qla82xx_load_fw(vha) != QLA_SUCCESS) { 2512 if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2489 qla_printk(KERN_INFO, ha, 2513 ql_log(ql_log_fatal, vha, 0x00a7,
2490 "%s: Error trying to start fw!\n", __func__); 2514 "Error trying to start fw.\n");
2491 return QLA_FUNCTION_FAILED; 2515 return QLA_FUNCTION_FAILED;
2492 } 2516 }
2493 2517
2494 /* Handshake with the card before we register the devices. */ 2518 /* Handshake with the card before we register the devices. */
2495 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { 2519 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2496 qla_printk(KERN_INFO, ha, 2520 ql_log(ql_log_fatal, vha, 0x00aa,
2497 "%s: Error during card handshake!\n", __func__); 2521 "Error during card handshake.\n");
2498 return QLA_FUNCTION_FAILED; 2522 return QLA_FUNCTION_FAILED;
2499 } 2523 }
2500 2524
@@ -2663,8 +2687,11 @@ qla82xx_start_scsi(srb_t *sp)
2663 /* Send marker if required */ 2687 /* Send marker if required */
2664 if (vha->marker_needed != 0) { 2688 if (vha->marker_needed != 0) {
2665 if (qla2x00_marker(vha, req, 2689 if (qla2x00_marker(vha, req,
2666 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 2690 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2691 ql_log(ql_log_warn, vha, 0x300c,
2692 "qla2x00_marker failed for cmd=%p.\n", cmd);
2667 return QLA_FUNCTION_FAILED; 2693 return QLA_FUNCTION_FAILED;
2694 }
2668 vha->marker_needed = 0; 2695 vha->marker_needed = 0;
2669 } 2696 }
2670 2697
@@ -2701,8 +2728,13 @@ qla82xx_start_scsi(srb_t *sp)
2701 uint16_t i; 2728 uint16_t i;
2702 2729
2703 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); 2730 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2704 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) 2731 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2732 ql_dbg(ql_dbg_io, vha, 0x300d,
2733 "Num of DSD list %d is than %d for cmd=%p.\n",
2734 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2735 cmd);
2705 goto queuing_error; 2736 goto queuing_error;
2737 }
2706 2738
2707 if (more_dsd_lists <= ha->gbl_dsd_avail) 2739 if (more_dsd_lists <= ha->gbl_dsd_avail)
2708 goto sufficient_dsds; 2740 goto sufficient_dsds;
@@ -2711,13 +2743,20 @@ qla82xx_start_scsi(srb_t *sp)
2711 2743
2712 for (i = 0; i < more_dsd_lists; i++) { 2744 for (i = 0; i < more_dsd_lists; i++) {
2713 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2745 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2714 if (!dsd_ptr) 2746 if (!dsd_ptr) {
2747 ql_log(ql_log_fatal, vha, 0x300e,
2748 "Failed to allocate memory for dsd_dma "
2749 "for cmd=%p.\n", cmd);
2715 goto queuing_error; 2750 goto queuing_error;
2751 }
2716 2752
2717 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2753 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2718 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2754 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2719 if (!dsd_ptr->dsd_addr) { 2755 if (!dsd_ptr->dsd_addr) {
2720 kfree(dsd_ptr); 2756 kfree(dsd_ptr);
2757 ql_log(ql_log_fatal, vha, 0x300f,
2758 "Failed to allocate memory for dsd_addr "
2759 "for cmd=%p.\n", cmd);
2721 goto queuing_error; 2760 goto queuing_error;
2722 } 2761 }
2723 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2762 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
@@ -2742,17 +2781,16 @@ sufficient_dsds:
2742 2781
2743 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2782 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2744 if (!sp->ctx) { 2783 if (!sp->ctx) {
2745 DEBUG(printk(KERN_INFO 2784 ql_log(ql_log_fatal, vha, 0x3010,
2746 "%s(%ld): failed to allocate" 2785 "Failed to allocate ctx for cmd=%p.\n", cmd);
2747 " ctx.\n", __func__, vha->host_no));
2748 goto queuing_error; 2786 goto queuing_error;
2749 } 2787 }
2750 memset(ctx, 0, sizeof(struct ct6_dsd)); 2788 memset(ctx, 0, sizeof(struct ct6_dsd));
2751 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2789 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2752 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2790 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2753 if (!ctx->fcp_cmnd) { 2791 if (!ctx->fcp_cmnd) {
2754 DEBUG2_3(printk("%s(%ld): failed to allocate" 2792 ql_log(ql_log_fatal, vha, 0x3011,
2755 " fcp_cmnd.\n", __func__, vha->host_no)); 2793 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2756 goto queuing_error_fcp_cmnd; 2794 goto queuing_error_fcp_cmnd;
2757 } 2795 }
2758 2796
@@ -2766,6 +2804,9 @@ sufficient_dsds:
2766 /* SCSI command bigger than 16 bytes must be 2804 /* SCSI command bigger than 16 bytes must be
2767 * multiple of 4 2805 * multiple of 4
2768 */ 2806 */
2807 ql_log(ql_log_warn, vha, 0x3012,
2808 "scsi cmd len %d not multiple of 4 "
2809 "for cmd=%p.\n", cmd->cmd_len, cmd);
2769 goto queuing_error_fcp_cmnd; 2810 goto queuing_error_fcp_cmnd;
2770 } 2811 }
2771 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2812 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
@@ -2845,7 +2886,7 @@ sufficient_dsds:
2845 cmd_pkt->entry_status = (uint8_t) rsp->id; 2886 cmd_pkt->entry_status = (uint8_t) rsp->id;
2846 } else { 2887 } else {
2847 struct cmd_type_7 *cmd_pkt; 2888 struct cmd_type_7 *cmd_pkt;
2848 req_cnt = qla24xx_calc_iocbs(tot_dsds); 2889 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2849 if (req->cnt < (req_cnt + 2)) { 2890 if (req->cnt < (req_cnt + 2)) {
2850 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2891 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2851 &reg->req_q_out[0]); 2892 &reg->req_q_out[0]);
@@ -2979,8 +3020,8 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2979 /* Dword reads to flash. */ 3020 /* Dword reads to flash. */
2980 for (i = 0; i < length/4; i++, faddr += 4) { 3021 for (i = 0; i < length/4; i++, faddr += 4) {
2981 if (qla82xx_rom_fast_read(ha, faddr, &val)) { 3022 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2982 qla_printk(KERN_WARNING, ha, 3023 ql_log(ql_log_warn, vha, 0x0106,
2983 "Do ROM fast read failed\n"); 3024 "Do ROM fast read failed.\n");
2984 goto done_read; 3025 goto done_read;
2985 } 3026 }
2986 dwptr[i] = __constant_cpu_to_le32(val); 3027 dwptr[i] = __constant_cpu_to_le32(val);
@@ -2994,10 +3035,12 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
2994{ 3035{
2995 int ret; 3036 int ret;
2996 uint32_t val; 3037 uint32_t val;
3038 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2997 3039
2998 ret = ql82xx_rom_lock_d(ha); 3040 ret = ql82xx_rom_lock_d(ha);
2999 if (ret < 0) { 3041 if (ret < 0) {
3000 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3042 ql_log(ql_log_warn, vha, 0xb014,
3043 "ROM Lock failed.\n");
3001 return ret; 3044 return ret;
3002 } 3045 }
3003 3046
@@ -3013,7 +3056,8 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
3013 } 3056 }
3014 3057
3015 if (qla82xx_write_disable_flash(ha) != 0) 3058 if (qla82xx_write_disable_flash(ha) != 0)
3016 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3059 ql_log(ql_log_warn, vha, 0xb015,
3060 "Write disable failed.\n");
3017 3061
3018done_unprotect: 3062done_unprotect:
3019 qla82xx_rom_unlock(ha); 3063 qla82xx_rom_unlock(ha);
@@ -3025,10 +3069,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3025{ 3069{
3026 int ret; 3070 int ret;
3027 uint32_t val; 3071 uint32_t val;
3072 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3028 3073
3029 ret = ql82xx_rom_lock_d(ha); 3074 ret = ql82xx_rom_lock_d(ha);
3030 if (ret < 0) { 3075 if (ret < 0) {
3031 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3076 ql_log(ql_log_warn, vha, 0xb016,
3077 "ROM Lock failed.\n");
3032 return ret; 3078 return ret;
3033 } 3079 }
3034 3080
@@ -3040,10 +3086,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3040 /* LOCK all sectors */ 3086 /* LOCK all sectors */
3041 ret = qla82xx_write_status_reg(ha, val); 3087 ret = qla82xx_write_status_reg(ha, val);
3042 if (ret < 0) 3088 if (ret < 0)
3043 qla_printk(KERN_WARNING, ha, "Write status register failed\n"); 3089 ql_log(ql_log_warn, vha, 0xb017,
3090 "Write status register failed.\n");
3044 3091
3045 if (qla82xx_write_disable_flash(ha) != 0) 3092 if (qla82xx_write_disable_flash(ha) != 0)
3046 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3093 ql_log(ql_log_warn, vha, 0xb018,
3094 "Write disable failed.\n");
3047done_protect: 3095done_protect:
3048 qla82xx_rom_unlock(ha); 3096 qla82xx_rom_unlock(ha);
3049 return ret; 3097 return ret;
@@ -3053,10 +3101,12 @@ static int
3053qla82xx_erase_sector(struct qla_hw_data *ha, int addr) 3101qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3054{ 3102{
3055 int ret = 0; 3103 int ret = 0;
3104 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3056 3105
3057 ret = ql82xx_rom_lock_d(ha); 3106 ret = ql82xx_rom_lock_d(ha);
3058 if (ret < 0) { 3107 if (ret < 0) {
3059 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3108 ql_log(ql_log_warn, vha, 0xb019,
3109 "ROM Lock failed.\n");
3060 return ret; 3110 return ret;
3061 } 3111 }
3062 3112
@@ -3066,8 +3116,8 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3066 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); 3116 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3067 3117
3068 if (qla82xx_wait_rom_done(ha)) { 3118 if (qla82xx_wait_rom_done(ha)) {
3069 qla_printk(KERN_WARNING, ha, 3119 ql_log(ql_log_warn, vha, 0xb01a,
3070 "Error waiting for rom done\n"); 3120 "Error waiting for rom done.\n");
3071 ret = -1; 3121 ret = -1;
3072 goto done; 3122 goto done;
3073 } 3123 }
@@ -3110,10 +3160,10 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3110 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 3160 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3111 &optrom_dma, GFP_KERNEL); 3161 &optrom_dma, GFP_KERNEL);
3112 if (!optrom) { 3162 if (!optrom) {
3113 qla_printk(KERN_DEBUG, ha, 3163 ql_log(ql_log_warn, vha, 0xb01b,
3114 "Unable to allocate memory for optrom " 3164 "Unable to allocate memory "
3115 "burst write (%x KB).\n", 3165 "for optron burst write (%x KB).\n",
3116 OPTROM_BURST_SIZE / 1024); 3166 OPTROM_BURST_SIZE / 1024);
3117 } 3167 }
3118 } 3168 }
3119 3169
@@ -3122,8 +3172,8 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3122 3172
3123 ret = qla82xx_unprotect_flash(ha); 3173 ret = qla82xx_unprotect_flash(ha);
3124 if (ret) { 3174 if (ret) {
3125 qla_printk(KERN_WARNING, ha, 3175 ql_log(ql_log_warn, vha, 0xb01c,
3126 "Unable to unprotect flash for update.\n"); 3176 "Unable to unprotect flash for update.\n");
3127 goto write_done; 3177 goto write_done;
3128 } 3178 }
3129 3179
@@ -3133,9 +3183,9 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3133 3183
3134 ret = qla82xx_erase_sector(ha, faddr); 3184 ret = qla82xx_erase_sector(ha, faddr);
3135 if (ret) { 3185 if (ret) {
3136 DEBUG9(qla_printk(KERN_ERR, ha, 3186 ql_log(ql_log_warn, vha, 0xb01d,
3137 "Unable to erase sector: " 3187 "Unable to erase sector: address=%x.\n",
3138 "address=%x.\n", faddr)); 3188 faddr);
3139 break; 3189 break;
3140 } 3190 }
3141 } 3191 }
@@ -3149,12 +3199,12 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3149 (ha->flash_data_off | faddr), 3199 (ha->flash_data_off | faddr),
3150 OPTROM_BURST_DWORDS); 3200 OPTROM_BURST_DWORDS);
3151 if (ret != QLA_SUCCESS) { 3201 if (ret != QLA_SUCCESS) {
3152 qla_printk(KERN_WARNING, ha, 3202 ql_log(ql_log_warn, vha, 0xb01e,
3153 "Unable to burst-write optrom segment " 3203 "Unable to burst-write optrom segment "
3154 "(%x/%x/%llx).\n", ret, 3204 "(%x/%x/%llx).\n", ret,
3155 (ha->flash_data_off | faddr), 3205 (ha->flash_data_off | faddr),
3156 (unsigned long long)optrom_dma); 3206 (unsigned long long)optrom_dma);
3157 qla_printk(KERN_WARNING, ha, 3207 ql_log(ql_log_warn, vha, 0xb01f,
3158 "Reverting to slow-write.\n"); 3208 "Reverting to slow-write.\n");
3159 3209
3160 dma_free_coherent(&ha->pdev->dev, 3210 dma_free_coherent(&ha->pdev->dev,
@@ -3171,16 +3221,16 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3171 ret = qla82xx_write_flash_dword(ha, faddr, 3221 ret = qla82xx_write_flash_dword(ha, faddr,
3172 cpu_to_le32(*dwptr)); 3222 cpu_to_le32(*dwptr));
3173 if (ret) { 3223 if (ret) {
3174 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program" 3224 ql_dbg(ql_dbg_p3p, vha, 0xb020,
3175 "flash address=%x data=%x.\n", __func__, 3225 "Unable to program flash address=%x data=%x.\n",
3176 ha->host_no, faddr, *dwptr)); 3226 faddr, *dwptr);
3177 break; 3227 break;
3178 } 3228 }
3179 } 3229 }
3180 3230
3181 ret = qla82xx_protect_flash(ha); 3231 ret = qla82xx_protect_flash(ha);
3182 if (ret) 3232 if (ret)
3183 qla_printk(KERN_WARNING, ha, 3233 ql_log(ql_log_warn, vha, 0xb021,
3184 "Unable to protect flash after update.\n"); 3234 "Unable to protect flash after update.\n");
3185write_done: 3235write_done:
3186 if (optrom) 3236 if (optrom)
@@ -3244,9 +3294,12 @@ qla82xx_start_iocbs(srb_t *sp)
3244 3294
3245void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) 3295void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3246{ 3296{
3297 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3298
3247 if (qla82xx_rom_lock(ha)) 3299 if (qla82xx_rom_lock(ha))
3248 /* Someone else is holding the lock. */ 3300 /* Someone else is holding the lock. */
3249 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n"); 3301 ql_log(ql_log_info, vha, 0xb022,
3302 "Resetting rom_lock.\n");
3250 3303
3251 /* 3304 /*
3252 * Either we got the lock, or someone 3305 * Either we got the lock, or someone
@@ -3313,7 +3366,8 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3313 3366
3314dev_initialize: 3367dev_initialize:
3315 /* set to DEV_INITIALIZING */ 3368 /* set to DEV_INITIALIZING */
3316 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 3369 ql_log(ql_log_info, vha, 0x009e,
3370 "HW State: INITIALIZING.\n");
3317 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 3371 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3318 3372
3319 /* Driver that sets device state to initializating sets IDC version */ 3373 /* Driver that sets device state to initializating sets IDC version */
@@ -3324,14 +3378,16 @@ dev_initialize:
3324 qla82xx_idc_lock(ha); 3378 qla82xx_idc_lock(ha);
3325 3379
3326 if (rval != QLA_SUCCESS) { 3380 if (rval != QLA_SUCCESS) {
3327 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 3381 ql_log(ql_log_fatal, vha, 0x00ad,
3382 "HW State: FAILED.\n");
3328 qla82xx_clear_drv_active(ha); 3383 qla82xx_clear_drv_active(ha);
3329 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 3384 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3330 return rval; 3385 return rval;
3331 } 3386 }
3332 3387
3333dev_ready: 3388dev_ready:
3334 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 3389 ql_log(ql_log_info, vha, 0x00ae,
3390 "HW State: READY.\n");
3335 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 3391 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3336 3392
3337 return QLA_SUCCESS; 3393 return QLA_SUCCESS;
@@ -3376,15 +3432,15 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3376 /* quiescence timeout, other functions didn't ack 3432 /* quiescence timeout, other functions didn't ack
3377 * changing the state to DEV_READY 3433 * changing the state to DEV_READY
3378 */ 3434 */
3379 qla_printk(KERN_INFO, ha, 3435 ql_log(ql_log_info, vha, 0xb023,
3380 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME); 3436 "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
3381 qla_printk(KERN_INFO, ha, 3437 ql_log(ql_log_info, vha, 0xb024,
3382 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active, 3438 "DRV_ACTIVE:%d DRV_STATE:%d.\n",
3383 drv_state); 3439 drv_active, drv_state);
3384 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3440 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3385 QLA82XX_DEV_READY); 3441 QLA82XX_DEV_READY);
3386 qla_printk(KERN_INFO, ha, 3442 ql_log(ql_log_info, vha, 0xb025,
3387 "HW State: DEV_READY\n"); 3443 "HW State: DEV_READY.\n");
3388 qla82xx_idc_unlock(ha); 3444 qla82xx_idc_unlock(ha);
3389 qla2x00_perform_loop_resync(vha); 3445 qla2x00_perform_loop_resync(vha);
3390 qla82xx_idc_lock(ha); 3446 qla82xx_idc_lock(ha);
@@ -3404,7 +3460,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3404 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3460 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3405 /* everyone acked so set the state to DEV_QUIESCENCE */ 3461 /* everyone acked so set the state to DEV_QUIESCENCE */
3406 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { 3462 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3407 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n"); 3463 ql_log(ql_log_info, vha, 0xb026,
3464 "HW State: DEV_QUIESCENT.\n");
3408 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); 3465 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3409 } 3466 }
3410} 3467}
@@ -3441,7 +3498,8 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3441 struct qla_hw_data *ha = vha->hw; 3498 struct qla_hw_data *ha = vha->hw;
3442 3499
3443 /* Disable the board */ 3500 /* Disable the board */
3444 qla_printk(KERN_INFO, ha, "Disabling the board\n"); 3501 ql_log(ql_log_fatal, vha, 0x00b8,
3502 "Disabling the board.\n");
3445 3503
3446 qla82xx_idc_lock(ha); 3504 qla82xx_idc_lock(ha);
3447 qla82xx_clear_drv_active(ha); 3505 qla82xx_clear_drv_active(ha);
@@ -3492,8 +3550,8 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3492 3550
3493 while (drv_state != drv_active) { 3551 while (drv_state != drv_active) {
3494 if (time_after_eq(jiffies, reset_timeout)) { 3552 if (time_after_eq(jiffies, reset_timeout)) {
3495 qla_printk(KERN_INFO, ha, 3553 ql_log(ql_log_warn, vha, 0x00b5,
3496 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME); 3554 "Reset timeout.\n");
3497 break; 3555 break;
3498 } 3556 }
3499 qla82xx_idc_unlock(ha); 3557 qla82xx_idc_unlock(ha);
@@ -3504,12 +3562,15 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3504 } 3562 }
3505 3563
3506 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3564 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3507 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 3565 ql_log(ql_log_info, vha, 0x00b6,
3508 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3566 "Device state is 0x%x = %s.\n",
3567 dev_state,
3568 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3509 3569
3510 /* Force to DEV_COLD unless someone else is starting a reset */ 3570 /* Force to DEV_COLD unless someone else is starting a reset */
3511 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3571 if (dev_state != QLA82XX_DEV_INITIALIZING) {
3512 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 3572 ql_log(ql_log_info, vha, 0x00b7,
3573 "HW State: COLD/RE-INIT.\n");
3513 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3574 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3514 } 3575 }
3515} 3576}
@@ -3523,8 +3584,12 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3523 fw_heartbeat_counter = qla82xx_rd_32(vha->hw, 3584 fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3524 QLA82XX_PEG_ALIVE_COUNTER); 3585 QLA82XX_PEG_ALIVE_COUNTER);
3525 /* all 0xff, assume AER/EEH in progress, ignore */ 3586 /* all 0xff, assume AER/EEH in progress, ignore */
3526 if (fw_heartbeat_counter == 0xffffffff) 3587 if (fw_heartbeat_counter == 0xffffffff) {
3588 ql_dbg(ql_dbg_timer, vha, 0x6003,
3589 "FW heartbeat counter is 0xffffffff, "
3590 "returning status=%d.\n", status);
3527 return status; 3591 return status;
3592 }
3528 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 3593 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3529 vha->seconds_since_last_heartbeat++; 3594 vha->seconds_since_last_heartbeat++;
3530 /* FW not alive after 2 seconds */ 3595 /* FW not alive after 2 seconds */
@@ -3535,6 +3600,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3535 } else 3600 } else
3536 vha->seconds_since_last_heartbeat = 0; 3601 vha->seconds_since_last_heartbeat = 0;
3537 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3602 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3603 if (status)
3604 ql_dbg(ql_dbg_timer, vha, 0x6004,
3605 "Returning status=%d.\n", status);
3538 return status; 3606 return status;
3539} 3607}
3540 3608
@@ -3565,8 +3633,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3565 3633
3566 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3634 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3567 old_dev_state = dev_state; 3635 old_dev_state = dev_state;
3568 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 3636 ql_log(ql_log_info, vha, 0x009b,
3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3637 "Device state is 0x%x = %s.\n",
3638 dev_state,
3639 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3570 3640
3571 /* wait for 30 seconds for device to go ready */ 3641 /* wait for 30 seconds for device to go ready */
3572 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3642 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3574,9 +3644,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3574 while (1) { 3644 while (1) {
3575 3645
3576 if (time_after_eq(jiffies, dev_init_timeout)) { 3646 if (time_after_eq(jiffies, dev_init_timeout)) {
3577 DEBUG(qla_printk(KERN_INFO, ha, 3647 ql_log(ql_log_fatal, vha, 0x009c,
3578 "%s: device init failed!\n", 3648 "Device init failed.\n");
3579 QLA2XXX_DRIVER_NAME));
3580 rval = QLA_FUNCTION_FAILED; 3649 rval = QLA_FUNCTION_FAILED;
3581 break; 3650 break;
3582 } 3651 }
@@ -3586,10 +3655,11 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3586 old_dev_state = dev_state; 3655 old_dev_state = dev_state;
3587 } 3656 }
3588 if (loopcount < 5) { 3657 if (loopcount < 5) {
3589 qla_printk(KERN_INFO, ha, 3658 ql_log(ql_log_info, vha, 0x009d,
3590 "2:Device state is 0x%x = %s\n", dev_state, 3659 "Device state is 0x%x = %s.\n",
3591 dev_state < MAX_STATES ? 3660 dev_state,
3592 qdev_state[dev_state] : "Unknown"); 3661 dev_state < MAX_STATES ? qdev_state[dev_state] :
3662 "Unknown");
3593 } 3663 }
3594 3664
3595 switch (dev_state) { 3665 switch (dev_state) {
@@ -3656,29 +3726,26 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3656 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3726 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3657 if (dev_state == QLA82XX_DEV_NEED_RESET && 3727 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3658 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3728 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3659 qla_printk(KERN_WARNING, ha, 3729 ql_log(ql_log_warn, vha, 0x6001,
3660 "scsi(%ld) %s: Adapter reset needed!\n", 3730 "Adapter reset needed.\n");
3661 vha->host_no, __func__);
3662 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3731 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3663 qla2xxx_wake_dpc(vha); 3732 qla2xxx_wake_dpc(vha);
3664 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3733 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3665 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3734 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3666 DEBUG(qla_printk(KERN_INFO, ha, 3735 ql_log(ql_log_warn, vha, 0x6002,
3667 "scsi(%ld) %s - detected quiescence needed\n", 3736 "Quiescent needed.\n");
3668 vha->host_no, __func__));
3669 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3737 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3670 qla2xxx_wake_dpc(vha); 3738 qla2xxx_wake_dpc(vha);
3671 } else { 3739 } else {
3672 if (qla82xx_check_fw_alive(vha)) { 3740 if (qla82xx_check_fw_alive(vha)) {
3673 halt_status = qla82xx_rd_32(ha, 3741 halt_status = qla82xx_rd_32(ha,
3674 QLA82XX_PEG_HALT_STATUS1); 3742 QLA82XX_PEG_HALT_STATUS1);
3675 qla_printk(KERN_INFO, ha, 3743 ql_dbg(ql_dbg_timer, vha, 0x6005,
3676 "scsi(%ld): %s, Dumping hw/fw registers:\n " 3744 "dumping hw/fw registers:.\n "
3677 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n " 3745 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3678 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n " 3746 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
3679 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n " 3747 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
3680 " PEG_NET_4_PC: 0x%x\n", 3748 " PEG_NET_4_PC: 0x%x.\n", halt_status,
3681 vha->host_no, __func__, halt_status,
3682 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), 3749 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3683 qla82xx_rd_32(ha, 3750 qla82xx_rd_32(ha,
3684 QLA82XX_CRB_PEG_NET_0 + 0x3c), 3751 QLA82XX_CRB_PEG_NET_0 + 0x3c),
@@ -3694,9 +3761,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3694 set_bit(ISP_UNRECOVERABLE, 3761 set_bit(ISP_UNRECOVERABLE,
3695 &vha->dpc_flags); 3762 &vha->dpc_flags);
3696 } else { 3763 } else {
3697 qla_printk(KERN_INFO, ha, 3764 ql_log(ql_log_info, vha, 0x6006,
3698 "scsi(%ld): %s - detect abort needed\n", 3765 "Detect abort needed.\n");
3699 vha->host_no, __func__);
3700 set_bit(ISP_ABORT_NEEDED, 3766 set_bit(ISP_ABORT_NEEDED,
3701 &vha->dpc_flags); 3767 &vha->dpc_flags);
3702 } 3768 }
@@ -3704,10 +3770,10 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3704 ha->flags.isp82xx_fw_hung = 1; 3770 ha->flags.isp82xx_fw_hung = 1;
3705 if (ha->flags.mbox_busy) { 3771 if (ha->flags.mbox_busy) {
3706 ha->flags.mbox_int = 1; 3772 ha->flags.mbox_int = 1;
3707 DEBUG2(qla_printk(KERN_ERR, ha, 3773 ql_log(ql_log_warn, vha, 0x6007,
3708 "scsi(%ld) Due to fw hung, doing " 3774 "Due to FW hung, doing "
3709 "premature completion of mbx " 3775 "premature completion of mbx "
3710 "command\n", vha->host_no)); 3776 "command.\n");
3711 if (test_bit(MBX_INTR_WAIT, 3777 if (test_bit(MBX_INTR_WAIT,
3712 &ha->mbx_cmd_flags)) 3778 &ha->mbx_cmd_flags))
3713 complete(&ha->mbx_intr_comp); 3779 complete(&ha->mbx_intr_comp);
@@ -3742,9 +3808,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3742 uint32_t dev_state; 3808 uint32_t dev_state;
3743 3809
3744 if (vha->device_flags & DFLG_DEV_FAILED) { 3810 if (vha->device_flags & DFLG_DEV_FAILED) {
3745 qla_printk(KERN_WARNING, ha, 3811 ql_log(ql_log_warn, vha, 0x8024,
3746 "%s(%ld): Device in failed state, " 3812 "Device in failed state, exiting.\n");
3747 "Exiting.\n", __func__, vha->host_no);
3748 return QLA_SUCCESS; 3813 return QLA_SUCCESS;
3749 } 3814 }
3750 ha->flags.isp82xx_reset_hdlr_active = 1; 3815 ha->flags.isp82xx_reset_hdlr_active = 1;
@@ -3752,13 +3817,14 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3752 qla82xx_idc_lock(ha); 3817 qla82xx_idc_lock(ha);
3753 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3818 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3754 if (dev_state == QLA82XX_DEV_READY) { 3819 if (dev_state == QLA82XX_DEV_READY) {
3755 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 3820 ql_log(ql_log_info, vha, 0x8025,
3821 "HW State: NEED RESET.\n");
3756 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3822 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3757 QLA82XX_DEV_NEED_RESET); 3823 QLA82XX_DEV_NEED_RESET);
3758 } else 3824 } else
3759 qla_printk(KERN_INFO, ha, "HW State: %s\n", 3825 ql_log(ql_log_info, vha, 0x8026,
3760 dev_state < MAX_STATES ? 3826 "Hw State: %s.\n", dev_state < MAX_STATES ?
3761 qdev_state[dev_state] : "Unknown"); 3827 qdev_state[dev_state] : "Unknown");
3762 qla82xx_idc_unlock(ha); 3828 qla82xx_idc_unlock(ha);
3763 3829
3764 rval = qla82xx_device_state_handler(vha); 3830 rval = qla82xx_device_state_handler(vha);
@@ -3777,9 +3843,9 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3777 vha->flags.online = 1; 3843 vha->flags.online = 1;
3778 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3844 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3779 if (ha->isp_abort_cnt == 0) { 3845 if (ha->isp_abort_cnt == 0) {
3780 qla_printk(KERN_WARNING, ha, 3846 ql_log(ql_log_warn, vha, 0x8027,
3781 "ISP error recovery failed - " 3847 "ISP error recover failed - board "
3782 "board disabled\n"); 3848 "disabled.\n");
3783 /* 3849 /*
3784 * The next call disables the board 3850 * The next call disables the board
3785 * completely. 3851 * completely.
@@ -3791,16 +3857,16 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3791 rval = QLA_SUCCESS; 3857 rval = QLA_SUCCESS;
3792 } else { /* schedule another ISP abort */ 3858 } else { /* schedule another ISP abort */
3793 ha->isp_abort_cnt--; 3859 ha->isp_abort_cnt--;
3794 DEBUG(qla_printk(KERN_INFO, ha, 3860 ql_log(ql_log_warn, vha, 0x8036,
3795 "qla%ld: ISP abort - retry remaining %d\n", 3861 "ISP abort - retry remaining %d.\n",
3796 vha->host_no, ha->isp_abort_cnt)); 3862 ha->isp_abort_cnt);
3797 rval = QLA_FUNCTION_FAILED; 3863 rval = QLA_FUNCTION_FAILED;
3798 } 3864 }
3799 } else { 3865 } else {
3800 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3866 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3801 DEBUG(qla_printk(KERN_INFO, ha, 3867 ql_dbg(ql_dbg_taskm, vha, 0x8029,
3802 "(%ld): ISP error recovery - retrying (%d) " 3868 "ISP error recovery - retrying (%d) more times.\n",
3803 "more times\n", vha->host_no, ha->isp_abort_cnt)); 3869 ha->isp_abort_cnt);
3804 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3870 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3805 rval = QLA_FUNCTION_FAILED; 3871 rval = QLA_FUNCTION_FAILED;
3806 } 3872 }
@@ -3872,8 +3938,8 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3872 break; 3938 break;
3873 } 3939 }
3874 } 3940 }
3875 DEBUG2(printk(KERN_INFO 3941 ql_dbg(ql_dbg_p3p, vha, 0xb027,
3876 "%s status=%d\n", __func__, status)); 3942 "%s status=%d.\n", status);
3877 3943
3878 return status; 3944 return status;
3879} 3945}
@@ -3902,6 +3968,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3902 } 3968 }
3903 } 3969 }
3904 } 3970 }
3971 ql_dbg(ql_dbg_init, vha, 0x00b0,
3972 "Entered %s fw_hung=%d.\n",
3973 __func__, ha->flags.isp82xx_fw_hung);
3905 3974
3906 /* Abort all commands gracefully if fw NOT hung */ 3975 /* Abort all commands gracefully if fw NOT hung */
3907 if (!ha->flags.isp82xx_fw_hung) { 3976 if (!ha->flags.isp82xx_fw_hung) {
@@ -3922,13 +3991,13 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3922 spin_unlock_irqrestore( 3991 spin_unlock_irqrestore(
3923 &ha->hardware_lock, flags); 3992 &ha->hardware_lock, flags);
3924 if (ha->isp_ops->abort_command(sp)) { 3993 if (ha->isp_ops->abort_command(sp)) {
3925 qla_printk(KERN_INFO, ha, 3994 ql_log(ql_log_info, vha,
3926 "scsi(%ld): mbx abort command failed in %s\n", 3995 0x00b1,
3927 vha->host_no, __func__); 3996 "mbx abort failed.\n");
3928 } else { 3997 } else {
3929 qla_printk(KERN_INFO, ha, 3998 ql_log(ql_log_info, vha,
3930 "scsi(%ld): mbx abort command success in %s\n", 3999 0x00b2,
3931 vha->host_no, __func__); 4000 "mbx abort success.\n");
3932 } 4001 }
3933 spin_lock_irqsave(&ha->hardware_lock, flags); 4002 spin_lock_irqsave(&ha->hardware_lock, flags);
3934 } 4003 }
@@ -3940,8 +4009,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3940 /* Wait for pending cmds (physical and virtual) to complete */ 4009 /* Wait for pending cmds (physical and virtual) to complete */
3941 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 4010 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3942 WAIT_HOST) == QLA_SUCCESS) { 4011 WAIT_HOST) == QLA_SUCCESS) {
3943 DEBUG2(qla_printk(KERN_INFO, ha, 4012 ql_dbg(ql_dbg_init, vha, 0x00b3,
3944 "Done wait for pending commands\n")); 4013 "Done wait for "
4014 "pending commands.\n");
3945 } 4015 }
3946 } 4016 }
3947} 4017}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8aa05c87b653..e02df276804e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -141,8 +141,8 @@ MODULE_PARM_DESC(ql2xmultique_tag,
141int ql2xfwloadbin; 141int ql2xfwloadbin;
142module_param(ql2xfwloadbin, int, S_IRUGO); 142module_param(ql2xfwloadbin, int, S_IRUGO);
143MODULE_PARM_DESC(ql2xfwloadbin, 143MODULE_PARM_DESC(ql2xfwloadbin,
144 "Option to specify location from which to load ISP firmware:\n" 144 "Option to specify location from which to load ISP firmware:.\n"
145 " 2 -- load firmware via the request_firmware() (hotplug)\n" 145 " 2 -- load firmware via the request_firmware() (hotplug).\n"
146 " interface.\n" 146 " interface.\n"
147 " 1 -- load firmware from flash.\n" 147 " 1 -- load firmware from flash.\n"
148 " 0 -- use default semantics.\n"); 148 " 0 -- use default semantics.\n");
@@ -156,7 +156,7 @@ MODULE_PARM_DESC(ql2xetsenable,
156int ql2xdbwr = 1; 156int ql2xdbwr = 1;
157module_param(ql2xdbwr, int, S_IRUGO); 157module_param(ql2xdbwr, int, S_IRUGO);
158MODULE_PARM_DESC(ql2xdbwr, 158MODULE_PARM_DESC(ql2xdbwr,
159 "Option to specify scheme for request queue posting\n" 159 "Option to specify scheme for request queue posting.\n"
160 " 0 -- Regular doorbell.\n" 160 " 0 -- Regular doorbell.\n"
161 " 1 -- CAMRAM doorbell (faster).\n"); 161 " 1 -- CAMRAM doorbell (faster).\n");
162 162
@@ -181,7 +181,7 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
181int ql2xdontresethba; 181int ql2xdontresethba;
182module_param(ql2xdontresethba, int, S_IRUGO); 182module_param(ql2xdontresethba, int, S_IRUGO);
183MODULE_PARM_DESC(ql2xdontresethba, 183MODULE_PARM_DESC(ql2xdontresethba,
184 "Option to specify reset behaviour\n" 184 "Option to specify reset behaviour.\n"
185 " 0 (Default) -- Reset on failure.\n" 185 " 0 (Default) -- Reset on failure.\n"
186 " 1 -- Do not reset on failure.\n"); 186 " 1 -- Do not reset on failure.\n");
187 187
@@ -260,8 +260,11 @@ static inline void
260qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 260qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
261{ 261{
262 /* Currently used for 82XX only. */ 262 /* Currently used for 82XX only. */
263 if (vha->device_flags & DFLG_DEV_FAILED) 263 if (vha->device_flags & DFLG_DEV_FAILED) {
264 ql_dbg(ql_dbg_timer, vha, 0x600d,
265 "Device in a failed state, returning.\n");
264 return; 266 return;
267 }
265 268
266 mod_timer(&vha->timer, jiffies + interval * HZ); 269 mod_timer(&vha->timer, jiffies + interval * HZ);
267} 270}
@@ -286,19 +289,20 @@ static void qla2x00_sp_free_dma(srb_t *);
286/* -------------------------------------------------------------------------- */ 289/* -------------------------------------------------------------------------- */
287static int qla2x00_alloc_queues(struct qla_hw_data *ha) 290static int qla2x00_alloc_queues(struct qla_hw_data *ha)
288{ 291{
292 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
289 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 293 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
290 GFP_KERNEL); 294 GFP_KERNEL);
291 if (!ha->req_q_map) { 295 if (!ha->req_q_map) {
292 qla_printk(KERN_WARNING, ha, 296 ql_log(ql_log_fatal, vha, 0x003b,
293 "Unable to allocate memory for request queue ptrs\n"); 297 "Unable to allocate memory for request queue ptrs.\n");
294 goto fail_req_map; 298 goto fail_req_map;
295 } 299 }
296 300
297 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 301 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
298 GFP_KERNEL); 302 GFP_KERNEL);
299 if (!ha->rsp_q_map) { 303 if (!ha->rsp_q_map) {
300 qla_printk(KERN_WARNING, ha, 304 ql_log(ql_log_fatal, vha, 0x003c,
301 "Unable to allocate memory for response queue ptrs\n"); 305 "Unable to allocate memory for response queue ptrs.\n");
302 goto fail_rsp_map; 306 goto fail_rsp_map;
303 } 307 }
304 set_bit(0, ha->rsp_qid_map); 308 set_bit(0, ha->rsp_qid_map);
@@ -362,8 +366,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
362 struct qla_hw_data *ha = vha->hw; 366 struct qla_hw_data *ha = vha->hw;
363 367
364 if (!(ha->fw_attributes & BIT_6)) { 368 if (!(ha->fw_attributes & BIT_6)) {
365 qla_printk(KERN_INFO, ha, 369 ql_log(ql_log_warn, vha, 0x00d8,
366 "Firmware is not multi-queue capable\n"); 370 "Firmware is not multi-queue capable.\n");
367 goto fail; 371 goto fail;
368 } 372 }
369 if (ql2xmultique_tag) { 373 if (ql2xmultique_tag) {
@@ -372,8 +376,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
372 req = qla25xx_create_req_que(ha, options, 0, 0, -1, 376 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
373 QLA_DEFAULT_QUE_QOS); 377 QLA_DEFAULT_QUE_QOS);
374 if (!req) { 378 if (!req) {
375 qla_printk(KERN_WARNING, ha, 379 ql_log(ql_log_warn, vha, 0x00e0,
376 "Can't create request queue\n"); 380 "Failed to create request queue.\n");
377 goto fail; 381 goto fail;
378 } 382 }
379 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 383 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
@@ -382,17 +386,20 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
382 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 386 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
383 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); 387 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
384 if (!ret) { 388 if (!ret) {
385 qla_printk(KERN_WARNING, ha, 389 ql_log(ql_log_warn, vha, 0x00e8,
386 "Response Queue create failed\n"); 390 "Failed to create response queue.\n");
387 goto fail2; 391 goto fail2;
388 } 392 }
389 } 393 }
390 ha->flags.cpu_affinity_enabled = 1; 394 ha->flags.cpu_affinity_enabled = 1;
391 395 ql_dbg(ql_dbg_multiq, vha, 0xc007,
392 DEBUG2(qla_printk(KERN_INFO, ha, 396 "CPU affinity mode enalbed, "
393 "CPU affinity mode enabled, no. of response" 397 "no. of response queues:%d no. of request queues:%d.\n",
394 " queues:%d, no. of request queues:%d\n", 398 ha->max_rsp_queues, ha->max_req_queues);
395 ha->max_rsp_queues, ha->max_req_queues)); 399 ql_dbg(ql_dbg_init, vha, 0x00e9,
400 "CPU affinity mode enalbed, "
401 "no. of response queues:%d no. of request queues:%d.\n",
402 ha->max_rsp_queues, ha->max_req_queues);
396 } 403 }
397 return 0; 404 return 0;
398fail2: 405fail2:
@@ -539,8 +546,11 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
539 struct qla_hw_data *ha = vha->hw; 546 struct qla_hw_data *ha = vha->hw;
540 547
541 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 548 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
542 if (!sp) 549 if (!sp) {
550 ql_log(ql_log_warn, vha, 0x3006,
551 "Memory allocation failed for sp.\n");
543 return sp; 552 return sp;
553 }
544 554
545 atomic_set(&sp->ref_count, 1); 555 atomic_set(&sp->ref_count, 1);
546 sp->fcport = fcport; 556 sp->fcport = fcport;
@@ -564,30 +574,43 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
564 int rval; 574 int rval;
565 575
566 if (ha->flags.eeh_busy) { 576 if (ha->flags.eeh_busy) {
567 if (ha->flags.pci_channel_io_perm_failure) 577 if (ha->flags.pci_channel_io_perm_failure) {
578 ql_dbg(ql_dbg_io, vha, 0x3001,
579 "PCI Channel IO permanent failure, exiting "
580 "cmd=%p.\n", cmd);
568 cmd->result = DID_NO_CONNECT << 16; 581 cmd->result = DID_NO_CONNECT << 16;
569 else 582 } else {
583 ql_dbg(ql_dbg_io, vha, 0x3002,
584 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
570 cmd->result = DID_REQUEUE << 16; 585 cmd->result = DID_REQUEUE << 16;
586 }
571 goto qc24_fail_command; 587 goto qc24_fail_command;
572 } 588 }
573 589
574 rval = fc_remote_port_chkready(rport); 590 rval = fc_remote_port_chkready(rport);
575 if (rval) { 591 if (rval) {
576 cmd->result = rval; 592 cmd->result = rval;
593 ql_dbg(ql_dbg_io, vha, 0x3003,
594 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
595 cmd, rval);
577 goto qc24_fail_command; 596 goto qc24_fail_command;
578 } 597 }
579 598
580 if (!vha->flags.difdix_supported && 599 if (!vha->flags.difdix_supported &&
581 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 600 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
582 DEBUG2(qla_printk(KERN_ERR, ha, 601 ql_dbg(ql_dbg_io, vha, 0x3004,
583 "DIF Cap Not Reg, fail DIF capable cmd's:%x\n", 602 "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
584 cmd->cmnd[0])); 603 cmd);
585 cmd->result = DID_NO_CONNECT << 16; 604 cmd->result = DID_NO_CONNECT << 16;
586 goto qc24_fail_command; 605 goto qc24_fail_command;
587 } 606 }
588 if (atomic_read(&fcport->state) != FCS_ONLINE) { 607 if (atomic_read(&fcport->state) != FCS_ONLINE) {
589 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 608 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
590 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 609 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
610 ql_dbg(ql_dbg_io, vha, 0x3005,
611 "Returning DNC, fcport_state=%d loop_state=%d.\n",
612 atomic_read(&fcport->state),
613 atomic_read(&base_vha->loop_state));
591 cmd->result = DID_NO_CONNECT << 16; 614 cmd->result = DID_NO_CONNECT << 16;
592 goto qc24_fail_command; 615 goto qc24_fail_command;
593 } 616 }
@@ -599,8 +622,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
599 goto qc24_host_busy; 622 goto qc24_host_busy;
600 623
601 rval = ha->isp_ops->start_scsi(sp); 624 rval = ha->isp_ops->start_scsi(sp);
602 if (rval != QLA_SUCCESS) 625 if (rval != QLA_SUCCESS) {
626 ql_dbg(ql_dbg_io, vha, 0x3013,
627 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
603 goto qc24_host_busy_free_sp; 628 goto qc24_host_busy_free_sp;
629 }
604 630
605 return 0; 631 return 0;
606 632
@@ -643,7 +669,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
643 int ret = QLA_SUCCESS; 669 int ret = QLA_SUCCESS;
644 670
645 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 671 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
646 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); 672 ql_dbg(ql_dbg_taskm, vha, 0x8005,
673 "Return:eh_wait.\n");
647 return ret; 674 return ret;
648 } 675 }
649 676
@@ -736,7 +763,8 @@ qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
736 else 763 else
737 return_status = QLA_FUNCTION_FAILED; 764 return_status = QLA_FUNCTION_FAILED;
738 765
739 DEBUG2(printk("%s return_status=%d\n", __func__, return_status)); 766 ql_dbg(ql_dbg_taskm, vha, 0x8019,
767 "%s return status=%d.\n", __func__, return_status);
740 768
741 return return_status; 769 return return_status;
742} 770}
@@ -844,10 +872,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
844 int wait = 0; 872 int wait = 0;
845 struct qla_hw_data *ha = vha->hw; 873 struct qla_hw_data *ha = vha->hw;
846 874
875 ql_dbg(ql_dbg_taskm, vha, 0x8000,
876 "Entered %s for cmd=%p.\n", __func__, cmd);
847 if (!CMD_SP(cmd)) 877 if (!CMD_SP(cmd))
848 return SUCCESS; 878 return SUCCESS;
849 879
850 ret = fc_block_scsi_eh(cmd); 880 ret = fc_block_scsi_eh(cmd);
881 ql_dbg(ql_dbg_taskm, vha, 0x8001,
882 "Return value of fc_block_scsi_eh=%d.\n", ret);
851 if (ret != 0) 883 if (ret != 0)
852 return ret; 884 return ret;
853 ret = SUCCESS; 885 ret = SUCCESS;
@@ -862,20 +894,19 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
862 return SUCCESS; 894 return SUCCESS;
863 } 895 }
864 896
865 DEBUG2(printk("%s(%ld): aborting sp %p from RISC.", 897 ql_dbg(ql_dbg_taskm, vha, 0x8002,
866 __func__, vha->host_no, sp)); 898 "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
867 899
868 /* Get a reference to the sp and drop the lock.*/ 900 /* Get a reference to the sp and drop the lock.*/
869 sp_get(sp); 901 sp_get(sp);
870 902
871 spin_unlock_irqrestore(&ha->hardware_lock, flags); 903 spin_unlock_irqrestore(&ha->hardware_lock, flags);
872 if (ha->isp_ops->abort_command(sp)) { 904 if (ha->isp_ops->abort_command(sp)) {
873 DEBUG2(printk("%s(%ld): abort_command " 905 ql_dbg(ql_dbg_taskm, vha, 0x8003,
874 "mbx failed.\n", __func__, vha->host_no)); 906 "Abort command mbx failed for cmd=%p.\n", cmd);
875 ret = FAILED;
876 } else { 907 } else {
877 DEBUG3(printk("%s(%ld): abort_command " 908 ql_dbg(ql_dbg_taskm, vha, 0x8004,
878 "mbx success.\n", __func__, vha->host_no)); 909 "Abort command mbx success.\n");
879 wait = 1; 910 wait = 1;
880 } 911 }
881 qla2x00_sp_compl(ha, sp); 912 qla2x00_sp_compl(ha, sp);
@@ -883,16 +914,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
883 /* Wait for the command to be returned. */ 914 /* Wait for the command to be returned. */
884 if (wait) { 915 if (wait) {
885 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 916 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
886 qla_printk(KERN_ERR, ha, 917 ql_log(ql_log_warn, vha, 0x8006,
887 "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n", 918 "Abort handler timed out for cmd=%p.\n", cmd);
888 vha->host_no, id, lun, ret);
889 ret = FAILED; 919 ret = FAILED;
890 } 920 }
891 } 921 }
892 922
893 qla_printk(KERN_INFO, ha, 923 ql_log(ql_log_info, vha, 0x801c,
894 "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n", 924 "Abort command issued -- %d %x.\n", wait, ret);
895 vha->host_no, id, lun, wait, ret);
896 925
897 return ret; 926 return ret;
898} 927}
@@ -960,40 +989,59 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
960 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 989 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
961 int err; 990 int err;
962 991
963 if (!fcport) 992 if (!fcport) {
993 ql_log(ql_log_warn, vha, 0x8007,
994 "fcport is NULL.\n");
964 return FAILED; 995 return FAILED;
996 }
965 997
966 err = fc_block_scsi_eh(cmd); 998 err = fc_block_scsi_eh(cmd);
999 ql_dbg(ql_dbg_taskm, vha, 0x8008,
1000 "fc_block_scsi_eh ret=%d.\n", err);
967 if (err != 0) 1001 if (err != 0)
968 return err; 1002 return err;
969 1003
970 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 1004 ql_log(ql_log_info, vha, 0x8009,
971 vha->host_no, cmd->device->id, cmd->device->lun, name); 1005 "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
1006 cmd->device->id, cmd->device->lun, cmd);
972 1007
973 err = 0; 1008 err = 0;
974 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1009 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1010 ql_log(ql_log_warn, vha, 0x800a,
1011 "Wait for hba online failed for cmd=%p.\n", cmd);
975 goto eh_reset_failed; 1012 goto eh_reset_failed;
1013 }
976 err = 1; 1014 err = 1;
977 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 1015 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
1016 ql_log(ql_log_warn, vha, 0x800b,
1017 "Wait for loop ready failed for cmd=%p.\n", cmd);
978 goto eh_reset_failed; 1018 goto eh_reset_failed;
1019 }
979 err = 2; 1020 err = 2;
980 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1021 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
981 != QLA_SUCCESS) 1022 != QLA_SUCCESS) {
1023 ql_log(ql_log_warn, vha, 0x800c,
1024 "do_reset failed for cmd=%p.\n", cmd);
982 goto eh_reset_failed; 1025 goto eh_reset_failed;
1026 }
983 err = 3; 1027 err = 3;
984 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1028 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
985 cmd->device->lun, type) != QLA_SUCCESS) 1029 cmd->device->lun, type) != QLA_SUCCESS) {
1030 ql_log(ql_log_warn, vha, 0x800d,
1031 "wait for peding cmds failed for cmd=%p.\n", cmd);
986 goto eh_reset_failed; 1032 goto eh_reset_failed;
1033 }
987 1034
988 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 1035 ql_log(ql_log_info, vha, 0x800e,
989 vha->host_no, cmd->device->id, cmd->device->lun, name); 1036 "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
1037 cmd->device->id, cmd->device->lun, cmd);
990 1038
991 return SUCCESS; 1039 return SUCCESS;
992 1040
993eh_reset_failed: 1041eh_reset_failed:
994 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n" 1042 ql_log(ql_log_info, vha, 0x800f,
995 , vha->host_no, cmd->device->id, cmd->device->lun, name, 1043 "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
996 reset_errors[err]); 1044 reset_errors[err], cmd->device->id, cmd->device->lun);
997 return FAILED; 1045 return FAILED;
998} 1046}
999 1047
@@ -1043,19 +1091,25 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1043 id = cmd->device->id; 1091 id = cmd->device->id;
1044 lun = cmd->device->lun; 1092 lun = cmd->device->lun;
1045 1093
1046 if (!fcport) 1094 if (!fcport) {
1095 ql_log(ql_log_warn, vha, 0x8010,
1096 "fcport is NULL.\n");
1047 return ret; 1097 return ret;
1098 }
1048 1099
1049 ret = fc_block_scsi_eh(cmd); 1100 ret = fc_block_scsi_eh(cmd);
1101 ql_dbg(ql_dbg_taskm, vha, 0x8011,
1102 "fc_block_scsi_eh ret=%d.\n", ret);
1050 if (ret != 0) 1103 if (ret != 0)
1051 return ret; 1104 return ret;
1052 ret = FAILED; 1105 ret = FAILED;
1053 1106
1054 qla_printk(KERN_INFO, vha->hw, 1107 ql_log(ql_log_info, vha, 0x8012,
1055 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun); 1108 "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
1056 1109
1057 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1110 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1058 DEBUG2(printk("%s failed:board disabled\n",__func__)); 1111 ql_log(ql_log_fatal, vha, 0x8013,
1112 "Wait for hba online failed board disabled.\n");
1059 goto eh_bus_reset_done; 1113 goto eh_bus_reset_done;
1060 } 1114 }
1061 1115
@@ -1068,12 +1122,15 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1068 1122
1069 /* Flush outstanding commands. */ 1123 /* Flush outstanding commands. */
1070 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1124 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1071 QLA_SUCCESS) 1125 QLA_SUCCESS) {
1126 ql_log(ql_log_warn, vha, 0x8014,
1127 "Wait for pending commands failed.\n");
1072 ret = FAILED; 1128 ret = FAILED;
1129 }
1073 1130
1074eh_bus_reset_done: 1131eh_bus_reset_done:
1075 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__, 1132 ql_log(ql_log_warn, vha, 0x802b,
1076 (ret == FAILED) ? "failed" : "succeeded"); 1133 "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
1077 1134
1078 return ret; 1135 return ret;
1079} 1136}
@@ -1106,16 +1163,21 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1106 id = cmd->device->id; 1163 id = cmd->device->id;
1107 lun = cmd->device->lun; 1164 lun = cmd->device->lun;
1108 1165
1109 if (!fcport) 1166 if (!fcport) {
1167 ql_log(ql_log_warn, vha, 0x8016,
1168 "fcport is NULL.\n");
1110 return ret; 1169 return ret;
1170 }
1111 1171
1112 ret = fc_block_scsi_eh(cmd); 1172 ret = fc_block_scsi_eh(cmd);
1173 ql_dbg(ql_dbg_taskm, vha, 0x8017,
1174 "fc_block_scsi_eh ret=%d.\n", ret);
1113 if (ret != 0) 1175 if (ret != 0)
1114 return ret; 1176 return ret;
1115 ret = FAILED; 1177 ret = FAILED;
1116 1178
1117 qla_printk(KERN_INFO, ha, 1179 ql_log(ql_log_info, vha, 0x8018,
1118 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); 1180 "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
1119 1181
1120 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1182 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1121 goto eh_host_reset_lock; 1183 goto eh_host_reset_lock;
@@ -1150,8 +1212,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1150 /* failed. schedule dpc to try */ 1212 /* failed. schedule dpc to try */
1151 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1213 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1152 1214
1153 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1215 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1216 ql_log(ql_log_warn, vha, 0x802a,
1217 "wait for hba online failed.\n");
1154 goto eh_host_reset_lock; 1218 goto eh_host_reset_lock;
1219 }
1155 } 1220 }
1156 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1221 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1157 } 1222 }
@@ -1162,7 +1227,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1162 ret = SUCCESS; 1227 ret = SUCCESS;
1163 1228
1164eh_host_reset_lock: 1229eh_host_reset_lock:
1165 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1230 qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
1166 (ret == FAILED) ? "failed" : "succeeded"); 1231 (ret == FAILED) ? "failed" : "succeeded");
1167 1232
1168 return ret; 1233 return ret;
@@ -1192,9 +1257,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1192 1257
1193 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1258 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1194 if (ret != QLA_SUCCESS) { 1259 if (ret != QLA_SUCCESS) {
1195 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1260 ql_dbg(ql_dbg_taskm, vha, 0x802c,
1196 "target_reset=%d d_id=%x.\n", __func__, 1261 "Bus Reset failed: Target Reset=%d "
1197 vha->host_no, ret, fcport->d_id.b24)); 1262 "d_id=%x.\n", ret, fcport->d_id.b24);
1198 } 1263 }
1199 } 1264 }
1200 } 1265 }
@@ -1202,9 +1267,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1202 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) { 1267 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1203 ret = qla2x00_full_login_lip(vha); 1268 ret = qla2x00_full_login_lip(vha);
1204 if (ret != QLA_SUCCESS) { 1269 if (ret != QLA_SUCCESS) {
1205 DEBUG2_3(printk("%s(%ld): failed: " 1270 ql_dbg(ql_dbg_taskm, vha, 0x802d,
1206 "full_login_lip=%d.\n", __func__, vha->host_no, 1271 "full_login_lip=%d.\n", ret);
1207 ret));
1208 } 1272 }
1209 atomic_set(&vha->loop_state, LOOP_DOWN); 1273 atomic_set(&vha->loop_state, LOOP_DOWN);
1210 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1274 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1215,8 +1279,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1215 if (ha->flags.enable_lip_reset) { 1279 if (ha->flags.enable_lip_reset) {
1216 ret = qla2x00_lip_reset(vha); 1280 ret = qla2x00_lip_reset(vha);
1217 if (ret != QLA_SUCCESS) { 1281 if (ret != QLA_SUCCESS) {
1218 DEBUG2_3(printk("%s(%ld): failed: " 1282 ql_dbg(ql_dbg_taskm, vha, 0x802e,
1219 "lip_reset=%d.\n", __func__, vha->host_no, ret)); 1283 "lip_reset failed (%d).\n", ret);
1220 } else 1284 } else
1221 qla2x00_wait_for_loop_ready(vha); 1285 qla2x00_wait_for_loop_ready(vha);
1222 } 1286 }
@@ -1315,17 +1379,17 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1315 if (!scsi_track_queue_full(sdev, qdepth)) 1379 if (!scsi_track_queue_full(sdev, qdepth))
1316 return; 1380 return;
1317 1381
1318 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, 1382 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1319 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 1383 "Queue depth adjusted-down "
1320 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1384 "to %d for scsi(%ld:%d:%d:%d).\n",
1321 sdev->queue_depth)); 1385 sdev->queue_depth, fcport->vha->host_no,
1386 sdev->channel, sdev->id, sdev->lun);
1322} 1387}
1323 1388
1324static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) 1389static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1325{ 1390{
1326 fc_port_t *fcport = sdev->hostdata; 1391 fc_port_t *fcport = sdev->hostdata;
1327 struct scsi_qla_host *vha = fcport->vha; 1392 struct scsi_qla_host *vha = fcport->vha;
1328 struct qla_hw_data *ha = vha->hw;
1329 struct req_que *req = NULL; 1393 struct req_que *req = NULL;
1330 1394
1331 req = vha->req; 1395 req = vha->req;
@@ -1340,10 +1404,11 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1340 else 1404 else
1341 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); 1405 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1342 1406
1343 DEBUG2(qla_printk(KERN_INFO, ha, 1407 ql_dbg(ql_dbg_io, vha, 0x302a,
1344 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 1408 "Queue depth adjusted-up to %d for "
1345 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1409 "scsi(%ld:%d:%d:%d).\n",
1346 sdev->queue_depth)); 1410 sdev->queue_depth, fcport->vha->host_no,
1411 sdev->channel, sdev->id, sdev->lun);
1347} 1412}
1348 1413
1349static int 1414static int
@@ -1789,6 +1854,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1789 ha->flags.port0 = 1; 1854 ha->flags.port0 = 1;
1790 else 1855 else
1791 ha->flags.port0 = 0; 1856 ha->flags.port0 = 0;
1857 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
1858 "device_type=0x%x port=%d fw_srisc_address=%p.\n",
1859 ha->device_type, ha->flags.port0, ha->fw_srisc_address);
1792} 1860}
1793 1861
1794static int 1862static int
@@ -1803,10 +1871,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1803 1871
1804 if (pci_request_selected_regions(ha->pdev, ha->bars, 1872 if (pci_request_selected_regions(ha->pdev, ha->bars,
1805 QLA2XXX_DRIVER_NAME)) { 1873 QLA2XXX_DRIVER_NAME)) {
1806 qla_printk(KERN_WARNING, ha, 1874 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1807 "Failed to reserve PIO/MMIO regions (%s)\n", 1875 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1808 pci_name(ha->pdev)); 1876 pci_name(ha->pdev));
1809
1810 goto iospace_error_exit; 1877 goto iospace_error_exit;
1811 } 1878 }
1812 if (!(ha->bars & 1)) 1879 if (!(ha->bars & 1))
@@ -1816,39 +1883,42 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1816 pio = pci_resource_start(ha->pdev, 0); 1883 pio = pci_resource_start(ha->pdev, 0);
1817 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1884 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1818 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1885 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1819 qla_printk(KERN_WARNING, ha, 1886 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1820 "Invalid PCI I/O region size (%s)...\n", 1887 "Invalid pci I/O region size (%s).\n",
1821 pci_name(ha->pdev)); 1888 pci_name(ha->pdev));
1822 pio = 0; 1889 pio = 0;
1823 } 1890 }
1824 } else { 1891 } else {
1825 qla_printk(KERN_WARNING, ha, 1892 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1826 "region #0 not a PIO resource (%s)...\n", 1893 "Region #0 no a PIO resource (%s).\n",
1827 pci_name(ha->pdev)); 1894 pci_name(ha->pdev));
1828 pio = 0; 1895 pio = 0;
1829 } 1896 }
1830 ha->pio_address = pio; 1897 ha->pio_address = pio;
1898 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1899 "PIO address=%p.\n",
1900 ha->pio_address);
1831 1901
1832skip_pio: 1902skip_pio:
1833 /* Use MMIO operations for all accesses. */ 1903 /* Use MMIO operations for all accesses. */
1834 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1904 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1835 qla_printk(KERN_ERR, ha, 1905 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1836 "region #1 not an MMIO resource (%s), aborting\n", 1906 "Region #1 not an MMIO resource (%s), aborting.\n",
1837 pci_name(ha->pdev)); 1907 pci_name(ha->pdev));
1838 goto iospace_error_exit; 1908 goto iospace_error_exit;
1839 } 1909 }
1840 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1910 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1841 qla_printk(KERN_ERR, ha, 1911 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1842 "Invalid PCI mem region size (%s), aborting\n", 1912 "Invalid PCI mem region size (%s), aborting.\n",
1843 pci_name(ha->pdev)); 1913 pci_name(ha->pdev));
1844 goto iospace_error_exit; 1914 goto iospace_error_exit;
1845 } 1915 }
1846 1916
1847 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1917 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1848 if (!ha->iobase) { 1918 if (!ha->iobase) {
1849 qla_printk(KERN_ERR, ha, 1919 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1850 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1920 "Cannot remap MMIO (%s), aborting.\n",
1851 1921 pci_name(ha->pdev));
1852 goto iospace_error_exit; 1922 goto iospace_error_exit;
1853 } 1923 }
1854 1924
@@ -1862,6 +1932,8 @@ skip_pio:
1862 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1932 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1863 pci_resource_len(ha->pdev, 3)); 1933 pci_resource_len(ha->pdev, 3));
1864 if (ha->mqiobase) { 1934 if (ha->mqiobase) {
1935 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1936 "MQIO Base=%p.\n", ha->mqiobase);
1865 /* Read MSIX vector size of the board */ 1937 /* Read MSIX vector size of the board */
1866 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1938 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1867 ha->msix_count = msix; 1939 ha->msix_count = msix;
@@ -1874,17 +1946,24 @@ skip_pio:
1874 ha->max_req_queues = 2; 1946 ha->max_req_queues = 2;
1875 } else if (ql2xmaxqueues > 1) { 1947 } else if (ql2xmaxqueues > 1) {
1876 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1948 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1877 QLA_MQ_SIZE : ql2xmaxqueues; 1949 QLA_MQ_SIZE : ql2xmaxqueues;
1878 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" 1950 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1879 " of request queues:%d\n", ha->max_req_queues)); 1951 "QoS mode set, max no of request queues:%d.\n",
1952 ha->max_req_queues);
1953 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1954 "QoS mode set, max no of request queues:%d.\n",
1955 ha->max_req_queues);
1880 } 1956 }
1881 qla_printk(KERN_INFO, ha, 1957 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1882 "MSI-X vector count: %d\n", msix); 1958 "MSI-X vector count: %d.\n", msix);
1883 } else 1959 } else
1884 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); 1960 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1961 "BAR 3 not enabled.\n");
1885 1962
1886mqiobase_exit: 1963mqiobase_exit:
1887 ha->msix_count = ha->max_rsp_queues + 1; 1964 ha->msix_count = ha->max_rsp_queues + 1;
1965 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1966 "MSIX Count:%d.\n", ha->msix_count);
1888 return (0); 1967 return (0);
1889 1968
1890iospace_error_exit: 1969iospace_error_exit:
@@ -1948,7 +2027,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1948 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) { 2027 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1949 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2028 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1950 mem_only = 1; 2029 mem_only = 1;
2030 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2031 "Mem only adapter.\n");
1951 } 2032 }
2033 ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2034 "Bars=%d.\n", bars);
1952 2035
1953 if (mem_only) { 2036 if (mem_only) {
1954 if (pci_enable_device_mem(pdev)) 2037 if (pci_enable_device_mem(pdev))
@@ -1963,9 +2046,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1963 2046
1964 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2047 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1965 if (!ha) { 2048 if (!ha) {
1966 DEBUG(printk("Unable to allocate memory for ha\n")); 2049 ql_log_pci(ql_log_fatal, pdev, 0x0009,
2050 "Unable to allocate memory for ha.\n");
1967 goto probe_out; 2051 goto probe_out;
1968 } 2052 }
2053 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2054 "Memory allocated for ha=%p.\n", ha);
1969 ha->pdev = pdev; 2055 ha->pdev = pdev;
1970 2056
1971 /* Clear our data area */ 2057 /* Clear our data area */
@@ -1987,10 +2073,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1987 if (ret) 2073 if (ret)
1988 goto probe_hw_failed; 2074 goto probe_hw_failed;
1989 2075
1990 qla_printk(KERN_INFO, ha, 2076 ql_log_pci(ql_log_info, pdev, 0x001d,
1991 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 2077 "Found an ISP%04X irq %d iobase 0x%p.\n",
1992 ha->iobase); 2078 pdev->device, pdev->irq, ha->iobase);
1993
1994 ha->prev_topology = 0; 2079 ha->prev_topology = 0;
1995 ha->init_cb_size = sizeof(init_cb_t); 2080 ha->init_cb_size = sizeof(init_cb_t);
1996 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2081 ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2091,7 +2176,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2091 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2176 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2092 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2177 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2093 } 2178 }
2094 2179 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
2180 "mbx_count=%d, req_length=%d, "
2181 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
2182 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
2183 ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
2184 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
2185 ha->nvram_npiv_size);
2186 ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
2187 "isp_ops=%p, flash_conf_off=%d, "
2188 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
2189 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
2190 ha->nvram_conf_off, ha->nvram_data_off);
2095 mutex_init(&ha->vport_lock); 2191 mutex_init(&ha->vport_lock);
2096 init_completion(&ha->mbx_cmd_comp); 2192 init_completion(&ha->mbx_cmd_comp);
2097 complete(&ha->mbx_cmd_comp); 2193 complete(&ha->mbx_cmd_comp);
@@ -2101,10 +2197,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2101 set_bit(0, (unsigned long *) ha->vp_idx_map); 2197 set_bit(0, (unsigned long *) ha->vp_idx_map);
2102 2198
2103 qla2x00_config_dma_addressing(ha); 2199 qla2x00_config_dma_addressing(ha);
2200 ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
2201 "64 Bit addressing is %s.\n",
2202 ha->flags.enable_64bit_addressing ? "enable" :
2203 "disable");
2104 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2204 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2105 if (!ret) { 2205 if (!ret) {
2106 qla_printk(KERN_WARNING, ha, 2206 ql_log_pci(ql_log_fatal, pdev, 0x0031,
2107 "[ERROR] Failed to allocate memory for adapter\n"); 2207 "Failed to allocate memory for adapter, aborting.\n");
2108 2208
2109 goto probe_hw_failed; 2209 goto probe_hw_failed;
2110 } 2210 }
@@ -2116,9 +2216,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2116 2216
2117 base_vha = qla2x00_create_host(sht, ha); 2217 base_vha = qla2x00_create_host(sht, ha);
2118 if (!base_vha) { 2218 if (!base_vha) {
2119 qla_printk(KERN_WARNING, ha,
2120 "[ERROR] Failed to allocate memory for scsi_host\n");
2121
2122 ret = -ENOMEM; 2219 ret = -ENOMEM;
2123 qla2x00_mem_free(ha); 2220 qla2x00_mem_free(ha);
2124 qla2x00_free_req_que(ha, req); 2221 qla2x00_free_req_que(ha, req);
@@ -2145,7 +2242,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2145 if (!IS_QLA82XX(ha)) 2242 if (!IS_QLA82XX(ha))
2146 host->sg_tablesize = QLA_SG_ALL; 2243 host->sg_tablesize = QLA_SG_ALL;
2147 } 2244 }
2148 2245 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2246 "can_queue=%d, req=%p, "
2247 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2248 host->can_queue, base_vha->req,
2249 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2149 host->max_id = max_id; 2250 host->max_id = max_id;
2150 host->this_id = 255; 2251 host->this_id = 255;
2151 host->cmd_per_lun = 3; 2252 host->cmd_per_lun = 3;
@@ -2159,6 +2260,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2159 host->transportt = qla2xxx_transport_template; 2260 host->transportt = qla2xxx_transport_template;
2160 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2261 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2161 2262
2263 ql_dbg(ql_dbg_init, base_vha, 0x0033,
2264 "max_id=%d this_id=%d "
2265 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
2266 "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
2267 host->this_id, host->cmd_per_lun, host->unique_id,
2268 host->max_cmd_len, host->max_channel, host->max_lun,
2269 host->transportt, sht->vendor_id);
2270
2162 /* Set up the irqs */ 2271 /* Set up the irqs */
2163 ret = qla2x00_request_irqs(ha, rsp); 2272 ret = qla2x00_request_irqs(ha, rsp);
2164 if (ret) 2273 if (ret)
@@ -2169,9 +2278,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2169 /* Alloc arrays of request and response ring ptrs */ 2278 /* Alloc arrays of request and response ring ptrs */
2170que_init: 2279que_init:
2171 if (!qla2x00_alloc_queues(ha)) { 2280 if (!qla2x00_alloc_queues(ha)) {
2172 qla_printk(KERN_WARNING, ha, 2281 ql_log(ql_log_fatal, base_vha, 0x003d,
2173 "[ERROR] Failed to allocate memory for queue" 2282 "Failed to allocate memory for queue pointers.. aborting.\n");
2174 " pointers\n");
2175 goto probe_init_failed; 2283 goto probe_init_failed;
2176 } 2284 }
2177 2285
@@ -2199,20 +2307,33 @@ que_init:
2199 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 2307 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2200 } 2308 }
2201 2309
2202 if (qla2x00_initialize_adapter(base_vha)) { 2310 ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
2203 qla_printk(KERN_WARNING, ha, 2311 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2204 "Failed to initialize adapter\n"); 2312 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2313 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
2314 "req->req_q_in=%p req->req_q_out=%p "
2315 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2316 req->req_q_in, req->req_q_out,
2317 rsp->rsp_q_in, rsp->rsp_q_out);
2318 ql_dbg(ql_dbg_init, base_vha, 0x003e,
2319 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2320 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2321 ql_dbg(ql_dbg_init, base_vha, 0x003f,
2322 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2323 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
2205 2324
2206 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 2325 if (qla2x00_initialize_adapter(base_vha)) {
2207 "Adapter flags %x.\n", 2326 ql_log(ql_log_fatal, base_vha, 0x00d6,
2208 base_vha->host_no, base_vha->device_flags)); 2327 "Failed to initialize adapter - Adapter flags %x.\n",
2328 base_vha->device_flags);
2209 2329
2210 if (IS_QLA82XX(ha)) { 2330 if (IS_QLA82XX(ha)) {
2211 qla82xx_idc_lock(ha); 2331 qla82xx_idc_lock(ha);
2212 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2332 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2213 QLA82XX_DEV_FAILED); 2333 QLA82XX_DEV_FAILED);
2214 qla82xx_idc_unlock(ha); 2334 qla82xx_idc_unlock(ha);
2215 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 2335 ql_log(ql_log_fatal, base_vha, 0x00d7,
2336 "HW State: FAILED.\n");
2216 } 2337 }
2217 2338
2218 ret = -ENODEV; 2339 ret = -ENODEV;
@@ -2221,9 +2342,8 @@ que_init:
2221 2342
2222 if (ha->mqenable) { 2343 if (ha->mqenable) {
2223 if (qla25xx_setup_mode(base_vha)) { 2344 if (qla25xx_setup_mode(base_vha)) {
2224 qla_printk(KERN_WARNING, ha, 2345 ql_log(ql_log_warn, base_vha, 0x00ec,
2225 "Can't create queues, falling back to single" 2346 "Failed to create queues, falling back to single queue mode.\n");
2226 " queue mode\n");
2227 goto que_init; 2347 goto que_init;
2228 } 2348 }
2229 } 2349 }
@@ -2235,13 +2355,15 @@ que_init:
2235 * Startup the kernel thread for this host adapter 2355 * Startup the kernel thread for this host adapter
2236 */ 2356 */
2237 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 2357 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2238 "%s_dpc", base_vha->host_str); 2358 "%s_dpc", base_vha->host_str);
2239 if (IS_ERR(ha->dpc_thread)) { 2359 if (IS_ERR(ha->dpc_thread)) {
2240 qla_printk(KERN_WARNING, ha, 2360 ql_log(ql_log_fatal, base_vha, 0x00ed,
2241 "Unable to start DPC thread!\n"); 2361 "Failed to start DPC thread.\n");
2242 ret = PTR_ERR(ha->dpc_thread); 2362 ret = PTR_ERR(ha->dpc_thread);
2243 goto probe_failed; 2363 goto probe_failed;
2244 } 2364 }
2365 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
2366 "DPC thread started successfully.\n");
2245 2367
2246skip_dpc: 2368skip_dpc:
2247 list_add_tail(&base_vha->list, &ha->vp_list); 2369 list_add_tail(&base_vha->list, &ha->vp_list);
@@ -2249,16 +2371,18 @@ skip_dpc:
2249 2371
2250 /* Initialized the timer */ 2372 /* Initialized the timer */
2251 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 2373 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2252 2374 ql_dbg(ql_dbg_init, base_vha, 0x00ef,
2253 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2375 "Started qla2x00_timer with "
2254 base_vha->host_no, ha)); 2376 "interval=%d.\n", WATCH_INTERVAL);
2377 ql_dbg(ql_dbg_init, base_vha, 0x00f0,
2378 "Detected hba at address=%p.\n",
2379 ha);
2255 2380
2256 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2381 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2257 if (ha->fw_attributes & BIT_4) { 2382 if (ha->fw_attributes & BIT_4) {
2258 base_vha->flags.difdix_supported = 1; 2383 base_vha->flags.difdix_supported = 1;
2259 DEBUG18(qla_printk(KERN_INFO, ha, 2384 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2260 "Registering for DIF/DIX type 1 and 3" 2385 "Registering for DIF/DIX type 1 and 3 protection.\n");
2261 " protection.\n"));
2262 scsi_host_set_prot(host, 2386 scsi_host_set_prot(host,
2263 SHOST_DIF_TYPE1_PROTECTION 2387 SHOST_DIF_TYPE1_PROTECTION
2264 | SHOST_DIF_TYPE2_PROTECTION 2388 | SHOST_DIF_TYPE2_PROTECTION
@@ -2280,6 +2404,9 @@ skip_dpc:
2280 base_vha->flags.init_done = 1; 2404 base_vha->flags.init_done = 1;
2281 base_vha->flags.online = 1; 2405 base_vha->flags.online = 1;
2282 2406
2407 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2408 "Init done and hba is online.\n");
2409
2283 scsi_scan_host(host); 2410 scsi_scan_host(host);
2284 2411
2285 qla2x00_alloc_sysfs_attr(base_vha); 2412 qla2x00_alloc_sysfs_attr(base_vha);
@@ -2288,14 +2415,17 @@ skip_dpc:
2288 2415
2289 qla2x00_dfs_setup(base_vha); 2416 qla2x00_dfs_setup(base_vha);
2290 2417
2291 qla_printk(KERN_INFO, ha, "\n" 2418 ql_log(ql_log_info, base_vha, 0x00fa,
2292 " QLogic Fibre Channel HBA Driver: %s\n" 2419 "QLogic Fibre Channed HBA Driver: %s.\n",
2293 " QLogic %s - %s\n" 2420 qla2x00_version_str);
2294 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 2421 ql_log(ql_log_info, base_vha, 0x00fb,
2295 qla2x00_version_str, ha->model_number, 2422 "QLogic %s - %s.\n",
2296 ha->model_desc ? ha->model_desc : "", pdev->device, 2423 ha->model_number, ha->model_desc ? ha->model_desc : "");
2297 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev), 2424 ql_log(ql_log_info, base_vha, 0x00fc,
2298 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, 2425 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
2426 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
2427 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
2428 base_vha->host_no,
2299 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2429 ha->isp_ops->fw_version_str(base_vha, fw_str));
2300 2430
2301 return 0; 2431 return 0;
@@ -2593,20 +2723,15 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2593 fcport->login_retry = vha->hw->login_retry_count; 2723 fcport->login_retry = vha->hw->login_retry_count;
2594 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2724 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2595 2725
2596 DEBUG(printk("scsi(%ld): Port login retry: " 2726 ql_dbg(ql_dbg_disc, vha, 0x2067,
2727 "Port login retry "
2597 "%02x%02x%02x%02x%02x%02x%02x%02x, " 2728 "%02x%02x%02x%02x%02x%02x%02x%02x, "
2598 "id = 0x%04x retry cnt=%d\n", 2729 "id = 0x%04x retry cnt=%d.\n",
2599 vha->host_no, 2730 fcport->port_name[0], fcport->port_name[1],
2600 fcport->port_name[0], 2731 fcport->port_name[2], fcport->port_name[3],
2601 fcport->port_name[1], 2732 fcport->port_name[4], fcport->port_name[5],
2602 fcport->port_name[2], 2733 fcport->port_name[6], fcport->port_name[7],
2603 fcport->port_name[3], 2734 fcport->loop_id, fcport->login_retry);
2604 fcport->port_name[4],
2605 fcport->port_name[5],
2606 fcport->port_name[6],
2607 fcport->port_name[7],
2608 fcport->loop_id,
2609 fcport->login_retry));
2610 } 2735 }
2611} 2736}
2612 2737
@@ -2689,6 +2814,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2689 ctx_cachep); 2814 ctx_cachep);
2690 if (!ha->ctx_mempool) 2815 if (!ha->ctx_mempool)
2691 goto fail_free_srb_mempool; 2816 goto fail_free_srb_mempool;
2817 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
2818 "ctx_cachep=%p ctx_mempool=%p.\n",
2819 ctx_cachep, ha->ctx_mempool);
2692 } 2820 }
2693 2821
2694 /* Get memory for cached NVRAM */ 2822 /* Get memory for cached NVRAM */
@@ -2703,22 +2831,29 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2703 if (!ha->s_dma_pool) 2831 if (!ha->s_dma_pool)
2704 goto fail_free_nvram; 2832 goto fail_free_nvram;
2705 2833
2834 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
2835 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
2836 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
2837
2706 if (IS_QLA82XX(ha) || ql2xenabledif) { 2838 if (IS_QLA82XX(ha) || ql2xenabledif) {
2707 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2839 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2708 DSD_LIST_DMA_POOL_SIZE, 8, 0); 2840 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2709 if (!ha->dl_dma_pool) { 2841 if (!ha->dl_dma_pool) {
2710 qla_printk(KERN_WARNING, ha, 2842 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
2711 "Memory Allocation failed - dl_dma_pool\n"); 2843 "Failed to allocate memory for dl_dma_pool.\n");
2712 goto fail_s_dma_pool; 2844 goto fail_s_dma_pool;
2713 } 2845 }
2714 2846
2715 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2847 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2716 FCP_CMND_DMA_POOL_SIZE, 8, 0); 2848 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2717 if (!ha->fcp_cmnd_dma_pool) { 2849 if (!ha->fcp_cmnd_dma_pool) {
2718 qla_printk(KERN_WARNING, ha, 2850 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
2719 "Memory Allocation failed - fcp_cmnd_dma_pool\n"); 2851 "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
2720 goto fail_dl_dma_pool; 2852 goto fail_dl_dma_pool;
2721 } 2853 }
2854 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
2855 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
2856 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
2722 } 2857 }
2723 2858
2724 /* Allocate memory for SNS commands */ 2859 /* Allocate memory for SNS commands */
@@ -2728,6 +2863,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2728 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2863 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2729 if (!ha->sns_cmd) 2864 if (!ha->sns_cmd)
2730 goto fail_dma_pool; 2865 goto fail_dma_pool;
2866 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
2867 "sns_cmd.\n", ha->sns_cmd);
2731 } else { 2868 } else {
2732 /* Get consistent memory allocated for MS IOCB */ 2869 /* Get consistent memory allocated for MS IOCB */
2733 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2870 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -2739,12 +2876,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2739 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2876 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2740 if (!ha->ct_sns) 2877 if (!ha->ct_sns)
2741 goto fail_free_ms_iocb; 2878 goto fail_free_ms_iocb;
2879 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
2880 "ms_iocb=%p ct_sns=%p.\n",
2881 ha->ms_iocb, ha->ct_sns);
2742 } 2882 }
2743 2883
2744 /* Allocate memory for request ring */ 2884 /* Allocate memory for request ring */
2745 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 2885 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2746 if (!*req) { 2886 if (!*req) {
2747 DEBUG(printk("Unable to allocate memory for req\n")); 2887 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
2888 "Failed to allocate memory for req.\n");
2748 goto fail_req; 2889 goto fail_req;
2749 } 2890 }
2750 (*req)->length = req_len; 2891 (*req)->length = req_len;
@@ -2752,14 +2893,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2752 ((*req)->length + 1) * sizeof(request_t), 2893 ((*req)->length + 1) * sizeof(request_t),
2753 &(*req)->dma, GFP_KERNEL); 2894 &(*req)->dma, GFP_KERNEL);
2754 if (!(*req)->ring) { 2895 if (!(*req)->ring) {
2755 DEBUG(printk("Unable to allocate memory for req_ring\n")); 2896 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
2897 "Failed to allocate memory for req_ring.\n");
2756 goto fail_req_ring; 2898 goto fail_req_ring;
2757 } 2899 }
2758 /* Allocate memory for response ring */ 2900 /* Allocate memory for response ring */
2759 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 2901 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2760 if (!*rsp) { 2902 if (!*rsp) {
2761 qla_printk(KERN_WARNING, ha, 2903 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
2762 "Unable to allocate memory for rsp\n"); 2904 "Failed to allocate memory for rsp.\n");
2763 goto fail_rsp; 2905 goto fail_rsp;
2764 } 2906 }
2765 (*rsp)->hw = ha; 2907 (*rsp)->hw = ha;
@@ -2768,19 +2910,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2768 ((*rsp)->length + 1) * sizeof(response_t), 2910 ((*rsp)->length + 1) * sizeof(response_t),
2769 &(*rsp)->dma, GFP_KERNEL); 2911 &(*rsp)->dma, GFP_KERNEL);
2770 if (!(*rsp)->ring) { 2912 if (!(*rsp)->ring) {
2771 qla_printk(KERN_WARNING, ha, 2913 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
2772 "Unable to allocate memory for rsp_ring\n"); 2914 "Failed to allocate memory for rsp_ring.\n");
2773 goto fail_rsp_ring; 2915 goto fail_rsp_ring;
2774 } 2916 }
2775 (*req)->rsp = *rsp; 2917 (*req)->rsp = *rsp;
2776 (*rsp)->req = *req; 2918 (*rsp)->req = *req;
2919 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
2920 "req=%p req->length=%d req->ring=%p rsp=%p "
2921 "rsp->length=%d rsp->ring=%p.\n",
2922 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
2923 (*rsp)->ring);
2777 /* Allocate memory for NVRAM data for vports */ 2924 /* Allocate memory for NVRAM data for vports */
2778 if (ha->nvram_npiv_size) { 2925 if (ha->nvram_npiv_size) {
2779 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 2926 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2780 ha->nvram_npiv_size, GFP_KERNEL); 2927 ha->nvram_npiv_size, GFP_KERNEL);
2781 if (!ha->npiv_info) { 2928 if (!ha->npiv_info) {
2782 qla_printk(KERN_WARNING, ha, 2929 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
2783 "Unable to allocate memory for npiv info\n"); 2930 "Failed to allocate memory for npiv_info.\n");
2784 goto fail_npiv_info; 2931 goto fail_npiv_info;
2785 } 2932 }
2786 } else 2933 } else
@@ -2792,6 +2939,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2792 &ha->ex_init_cb_dma); 2939 &ha->ex_init_cb_dma);
2793 if (!ha->ex_init_cb) 2940 if (!ha->ex_init_cb)
2794 goto fail_ex_init_cb; 2941 goto fail_ex_init_cb;
2942 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
2943 "ex_init_cb=%p.\n", ha->ex_init_cb);
2795 } 2944 }
2796 2945
2797 INIT_LIST_HEAD(&ha->gbl_dsd_list); 2946 INIT_LIST_HEAD(&ha->gbl_dsd_list);
@@ -2802,6 +2951,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2802 &ha->async_pd_dma); 2951 &ha->async_pd_dma);
2803 if (!ha->async_pd) 2952 if (!ha->async_pd)
2804 goto fail_async_pd; 2953 goto fail_async_pd;
2954 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
2955 "async_pd=%p.\n", ha->async_pd);
2805 } 2956 }
2806 2957
2807 INIT_LIST_HEAD(&ha->vp_list); 2958 INIT_LIST_HEAD(&ha->vp_list);
@@ -2867,7 +3018,8 @@ fail_free_init_cb:
2867 ha->init_cb = NULL; 3018 ha->init_cb = NULL;
2868 ha->init_cb_dma = 0; 3019 ha->init_cb_dma = 0;
2869fail: 3020fail:
2870 DEBUG(printk("%s: Memory allocation failure\n", __func__)); 3021 ql_log(ql_log_fatal, NULL, 0x0030,
3022 "Memory allocation failure.\n");
2871 return -ENOMEM; 3023 return -ENOMEM;
2872} 3024}
2873 3025
@@ -3016,8 +3168,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3016 3168
3017 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 3169 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
3018 if (host == NULL) { 3170 if (host == NULL) {
3019 printk(KERN_WARNING 3171 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
3020 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 3172 "Failed to allocate host from the scsi layer, aborting.\n");
3021 goto fail; 3173 goto fail;
3022 } 3174 }
3023 3175
@@ -3036,6 +3188,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3036 spin_lock_init(&vha->work_lock); 3188 spin_lock_init(&vha->work_lock);
3037 3189
3038 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3190 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3191 ql_dbg(ql_dbg_init, vha, 0x0041,
3192 "Allocated the host=%p hw=%p vha=%p dev_name=%s",
3193 vha->host, vha->hw, vha,
3194 dev_name(&(ha->pdev->dev)));
3195
3039 return vha; 3196 return vha;
3040 3197
3041fail: 3198fail:
@@ -3277,18 +3434,18 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3277 if (status == QLA_SUCCESS) { 3434 if (status == QLA_SUCCESS) {
3278 fcport->old_loop_id = fcport->loop_id; 3435 fcport->old_loop_id = fcport->loop_id;
3279 3436
3280 DEBUG(printk("scsi(%ld): port login OK: logged " 3437 ql_dbg(ql_dbg_disc, vha, 0x2003,
3281 "in ID 0x%x\n", vha->host_no, fcport->loop_id)); 3438 "Port login OK: logged in ID 0x%x.\n",
3439 fcport->loop_id);
3282 3440
3283 qla2x00_update_fcport(vha, fcport); 3441 qla2x00_update_fcport(vha, fcport);
3284 3442
3285 } else if (status == 1) { 3443 } else if (status == 1) {
3286 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3444 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3287 /* retry the login again */ 3445 /* retry the login again */
3288 DEBUG(printk("scsi(%ld): Retrying" 3446 ql_dbg(ql_dbg_disc, vha, 0x2007,
3289 " %d login again loop_id 0x%x\n", 3447 "Retrying %d login again loop_id 0x%x.\n",
3290 vha->host_no, fcport->login_retry, 3448 fcport->login_retry, fcport->loop_id);
3291 fcport->loop_id));
3292 } else { 3449 } else {
3293 fcport->login_retry = 0; 3450 fcport->login_retry = 0;
3294 } 3451 }
@@ -3328,26 +3485,27 @@ qla2x00_do_dpc(void *data)
3328 3485
3329 set_current_state(TASK_INTERRUPTIBLE); 3486 set_current_state(TASK_INTERRUPTIBLE);
3330 while (!kthread_should_stop()) { 3487 while (!kthread_should_stop()) {
3331 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3488 ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
3489 "DPC handler sleeping.\n");
3332 3490
3333 schedule(); 3491 schedule();
3334 __set_current_state(TASK_RUNNING); 3492 __set_current_state(TASK_RUNNING);
3335 3493
3336 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 3494 ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
3495 "DPC handler waking up.\n");
3496 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3497 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3337 3498
3338 /* Initialization not yet finished. Don't do anything yet. */ 3499 /* Initialization not yet finished. Don't do anything yet. */
3339 if (!base_vha->flags.init_done) 3500 if (!base_vha->flags.init_done)
3340 continue; 3501 continue;
3341 3502
3342 if (ha->flags.eeh_busy) { 3503 if (ha->flags.eeh_busy) {
3343 DEBUG17(qla_printk(KERN_WARNING, ha, 3504 ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
3344 "qla2x00_do_dpc: dpc_flags: %lx\n", 3505 "eeh_busy=%d.\n", ha->flags.eeh_busy);
3345 base_vha->dpc_flags));
3346 continue; 3506 continue;
3347 } 3507 }
3348 3508
3349 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
3350
3351 ha->dpc_active = 1; 3509 ha->dpc_active = 1;
3352 3510
3353 if (ha->flags.mbox_busy) { 3511 if (ha->flags.mbox_busy) {
@@ -3364,8 +3522,8 @@ qla2x00_do_dpc(void *data)
3364 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3522 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3365 QLA82XX_DEV_FAILED); 3523 QLA82XX_DEV_FAILED);
3366 qla82xx_idc_unlock(ha); 3524 qla82xx_idc_unlock(ha);
3367 qla_printk(KERN_INFO, ha, 3525 ql_log(ql_log_info, base_vha, 0x4004,
3368 "HW State: FAILED\n"); 3526 "HW State: FAILED.\n");
3369 qla82xx_device_state_handler(base_vha); 3527 qla82xx_device_state_handler(base_vha);
3370 continue; 3528 continue;
3371 } 3529 }
@@ -3373,10 +3531,8 @@ qla2x00_do_dpc(void *data)
3373 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 3531 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3374 &base_vha->dpc_flags)) { 3532 &base_vha->dpc_flags)) {
3375 3533
3376 DEBUG(printk(KERN_INFO 3534 ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
3377 "scsi(%ld): dpc: sched " 3535 "FCoE context reset scheduled.\n");
3378 "qla82xx_fcoe_ctx_reset ha = %p\n",
3379 base_vha->host_no, ha));
3380 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3536 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3381 &base_vha->dpc_flags))) { 3537 &base_vha->dpc_flags))) {
3382 if (qla82xx_fcoe_ctx_reset(base_vha)) { 3538 if (qla82xx_fcoe_ctx_reset(base_vha)) {
@@ -3390,18 +3546,16 @@ qla2x00_do_dpc(void *data)
3390 &base_vha->dpc_flags); 3546 &base_vha->dpc_flags);
3391 } 3547 }
3392 3548
3393 DEBUG(printk("scsi(%ld): dpc:" 3549 ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
3394 " qla82xx_fcoe_ctx_reset end\n", 3550 "FCoE context reset end.\n");
3395 base_vha->host_no));
3396 } 3551 }
3397 } 3552 }
3398 3553
3399 if (test_and_clear_bit(ISP_ABORT_NEEDED, 3554 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3400 &base_vha->dpc_flags)) { 3555 &base_vha->dpc_flags)) {
3401 3556
3402 DEBUG(printk("scsi(%ld): dpc: sched " 3557 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
3403 "qla2x00_abort_isp ha = %p\n", 3558 "ISP abort scheduled.\n");
3404 base_vha->host_no, ha));
3405 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3559 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3406 &base_vha->dpc_flags))) { 3560 &base_vha->dpc_flags))) {
3407 3561
@@ -3414,8 +3568,8 @@ qla2x00_do_dpc(void *data)
3414 &base_vha->dpc_flags); 3568 &base_vha->dpc_flags);
3415 } 3569 }
3416 3570
3417 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 3571 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
3418 base_vha->host_no)); 3572 "ISP abort end.\n");
3419 } 3573 }
3420 3574
3421 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { 3575 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
@@ -3424,9 +3578,8 @@ qla2x00_do_dpc(void *data)
3424 } 3578 }
3425 3579
3426 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 3580 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3427 DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched " 3581 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3428 "qla2x00_quiesce_needed ha = %p\n", 3582 "Quiescence mode scheduled.\n");
3429 base_vha->host_no, ha));
3430 qla82xx_device_state_handler(base_vha); 3583 qla82xx_device_state_handler(base_vha);
3431 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); 3584 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
3432 if (!ha->flags.quiesce_owner) { 3585 if (!ha->flags.quiesce_owner) {
@@ -3436,17 +3589,20 @@ qla2x00_do_dpc(void *data)
3436 qla82xx_clear_qsnt_ready(base_vha); 3589 qla82xx_clear_qsnt_ready(base_vha);
3437 qla82xx_idc_unlock(ha); 3590 qla82xx_idc_unlock(ha);
3438 } 3591 }
3592 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
3593 "Quiescence mode end.\n");
3439 } 3594 }
3440 3595
3441 if (test_and_clear_bit(RESET_MARKER_NEEDED, 3596 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3442 &base_vha->dpc_flags) && 3597 &base_vha->dpc_flags) &&
3443 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 3598 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
3444 3599
3445 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 3600 ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
3446 base_vha->host_no)); 3601 "Reset marker scheduled.\n");
3447
3448 qla2x00_rst_aen(base_vha); 3602 qla2x00_rst_aen(base_vha);
3449 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 3603 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
3604 ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
3605 "Reset marker end.\n");
3450 } 3606 }
3451 3607
3452 /* Retry each device up to login retry count */ 3608 /* Retry each device up to login retry count */
@@ -3455,19 +3611,18 @@ qla2x00_do_dpc(void *data)
3455 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 3611 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3456 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 3612 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
3457 3613
3458 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 3614 ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
3459 base_vha->host_no)); 3615 "Relogin scheduled.\n");
3460 qla2x00_relogin(base_vha); 3616 qla2x00_relogin(base_vha);
3461 3617 ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
3462 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 3618 "Relogin end.\n");
3463 base_vha->host_no));
3464 } 3619 }
3465 3620
3466 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 3621 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3467 &base_vha->dpc_flags)) { 3622 &base_vha->dpc_flags)) {
3468 3623
3469 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 3624 ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
3470 base_vha->host_no)); 3625 "Loop resync scheduled.\n");
3471 3626
3472 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 3627 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
3473 &base_vha->dpc_flags))) { 3628 &base_vha->dpc_flags))) {
@@ -3478,8 +3633,8 @@ qla2x00_do_dpc(void *data)
3478 &base_vha->dpc_flags); 3633 &base_vha->dpc_flags);
3479 } 3634 }
3480 3635
3481 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 3636 ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
3482 base_vha->host_no)); 3637 "Loop resync end.\n");
3483 } 3638 }
3484 3639
3485 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 3640 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
@@ -3502,7 +3657,8 @@ qla2x00_do_dpc(void *data)
3502 } /* End of while(1) */ 3657 } /* End of while(1) */
3503 __set_current_state(TASK_RUNNING); 3658 __set_current_state(TASK_RUNNING);
3504 3659
3505 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3660 ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
3661 "DPC handler exiting.\n");
3506 3662
3507 /* 3663 /*
3508 * Make sure that nobody tries to wake us up again. 3664 * Make sure that nobody tries to wake us up again.
@@ -3609,9 +3765,11 @@ void
3609qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) 3765qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3610{ 3766{
3611 if (atomic_read(&sp->ref_count) == 0) { 3767 if (atomic_read(&sp->ref_count) == 0) {
3612 DEBUG2(qla_printk(KERN_WARNING, ha, 3768 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
3613 "SP reference-count to ZERO -- sp=%p\n", sp)); 3769 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
3614 DEBUG2(BUG()); 3770 sp, sp->cmd);
3771 if (ql2xextended_error_logging & ql_dbg_io)
3772 BUG();
3615 return; 3773 return;
3616 } 3774 }
3617 if (!atomic_dec_and_test(&sp->ref_count)) 3775 if (!atomic_dec_and_test(&sp->ref_count))
@@ -3639,6 +3797,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3639 struct req_que *req; 3797 struct req_que *req;
3640 3798
3641 if (ha->flags.eeh_busy) { 3799 if (ha->flags.eeh_busy) {
3800 ql_dbg(ql_dbg_timer, vha, 0x6000,
3801 "EEH = %d, restarting timer.\n",
3802 ha->flags.eeh_busy);
3642 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3803 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3643 return; 3804 return;
3644 } 3805 }
@@ -3663,9 +3824,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3663 if (atomic_read(&vha->loop_down_timer) == 3824 if (atomic_read(&vha->loop_down_timer) ==
3664 vha->loop_down_abort_time) { 3825 vha->loop_down_abort_time) {
3665 3826
3666 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 3827 ql_log(ql_log_info, vha, 0x6008,
3667 "queues before time expire\n", 3828 "Loop down - aborting the queues before time expires.\n");
3668 vha->host_no));
3669 3829
3670 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3830 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3671 atomic_set(&vha->loop_state, LOOP_DEAD); 3831 atomic_set(&vha->loop_state, LOOP_DEAD);
@@ -3710,10 +3870,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3710 /* if the loop has been down for 4 minutes, reinit adapter */ 3870 /* if the loop has been down for 4 minutes, reinit adapter */
3711 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 3871 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3712 if (!(vha->device_flags & DFLG_NO_CABLE)) { 3872 if (!(vha->device_flags & DFLG_NO_CABLE)) {
3713 DEBUG(printk("scsi(%ld): Loop down - " 3873 ql_log(ql_log_warn, vha, 0x6009,
3714 "aborting ISP.\n",
3715 vha->host_no));
3716 qla_printk(KERN_WARNING, ha,
3717 "Loop down - aborting ISP.\n"); 3874 "Loop down - aborting ISP.\n");
3718 3875
3719 if (IS_QLA82XX(ha)) 3876 if (IS_QLA82XX(ha))
@@ -3724,9 +3881,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3724 &vha->dpc_flags); 3881 &vha->dpc_flags);
3725 } 3882 }
3726 } 3883 }
3727 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 3884 ql_dbg(ql_dbg_timer, vha, 0x600a,
3728 vha->host_no, 3885 "Loop down - seconds remaining %d.\n",
3729 atomic_read(&vha->loop_down_timer))); 3886 atomic_read(&vha->loop_down_timer));
3730 } 3887 }
3731 3888
3732 /* Check if beacon LED needs to be blinked for physical host only */ 3889 /* Check if beacon LED needs to be blinked for physical host only */
@@ -3749,8 +3906,27 @@ qla2x00_timer(scsi_qla_host_t *vha)
3749 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 3906 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3750 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 3907 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3751 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 3908 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3752 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) 3909 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
3910 ql_dbg(ql_dbg_timer, vha, 0x600b,
3911 "isp_abort_needed=%d loop_resync_needed=%d "
3912 "fcport_update_needed=%d start_dpc=%d "
3913 "reset_marker_needed=%d",
3914 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
3915 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
3916 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
3917 start_dpc,
3918 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
3919 ql_dbg(ql_dbg_timer, vha, 0x600c,
3920 "beacon_blink_needed=%d isp_unrecoverable=%d "
3921 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
3922 "relogin_needed=%d.\n",
3923 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
3924 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
3925 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
3926 test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
3927 test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
3753 qla2xxx_wake_dpc(vha); 3928 qla2xxx_wake_dpc(vha);
3929 }
3754 3930
3755 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3931 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3756} 3932}
@@ -3819,8 +3995,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
3819 goto out; 3995 goto out;
3820 3996
3821 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 3997 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3822 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 3998 ql_log(ql_log_warn, vha, 0x0063,
3823 "(%s).\n", vha->host_no, blob->name)); 3999 "Failed to load firmware image (%s).\n", blob->name);
3824 blob->fw = NULL; 4000 blob->fw = NULL;
3825 blob = NULL; 4001 blob = NULL;
3826 goto out; 4002 goto out;
@@ -3849,8 +4025,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3849 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 4025 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3850 struct qla_hw_data *ha = vha->hw; 4026 struct qla_hw_data *ha = vha->hw;
3851 4027
3852 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", 4028 ql_dbg(ql_dbg_aer, vha, 0x9000,
3853 state)); 4029 "PCI error detected, state %x.\n", state);
3854 4030
3855 switch (state) { 4031 switch (state) {
3856 case pci_channel_io_normal: 4032 case pci_channel_io_normal:
@@ -3863,9 +4039,9 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3863 ha->flags.isp82xx_fw_hung = 1; 4039 ha->flags.isp82xx_fw_hung = 1;
3864 if (ha->flags.mbox_busy) { 4040 if (ha->flags.mbox_busy) {
3865 ha->flags.mbox_int = 1; 4041 ha->flags.mbox_int = 1;
3866 DEBUG2(qla_printk(KERN_ERR, ha, 4042 ql_dbg(ql_dbg_aer, vha, 0x9001,
3867 "Due to pci channel io frozen, doing premature " 4043 "Due to pci channel io frozen, doing premature "
3868 "completion of mbx command\n")); 4044 "completion of mbx command.\n");
3869 complete(&ha->mbx_intr_comp); 4045 complete(&ha->mbx_intr_comp);
3870 } 4046 }
3871 } 4047 }
@@ -3913,8 +4089,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3913 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4089 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3914 4090
3915 if (risc_paused) { 4091 if (risc_paused) {
3916 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 4092 ql_log(ql_log_info, base_vha, 0x9003,
3917 "Dumping firmware!\n"); 4093 "RISC paused -- mmio_enabled, Dumping firmware.\n");
3918 ha->isp_ops->fw_dump(base_vha, 0); 4094 ha->isp_ops->fw_dump(base_vha, 0);
3919 4095
3920 return PCI_ERS_RESULT_NEED_RESET; 4096 return PCI_ERS_RESULT_NEED_RESET;
@@ -3930,8 +4106,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3930 int fn; 4106 int fn;
3931 struct pci_dev *other_pdev = NULL; 4107 struct pci_dev *other_pdev = NULL;
3932 4108
3933 DEBUG17(qla_printk(KERN_INFO, ha, 4109 ql_dbg(ql_dbg_aer, base_vha, 0x9006,
3934 "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no)); 4110 "Entered %s.\n", __func__);
3935 4111
3936 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 4112 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3937 4113
@@ -3945,8 +4121,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3945 fn = PCI_FUNC(ha->pdev->devfn); 4121 fn = PCI_FUNC(ha->pdev->devfn);
3946 while (fn > 0) { 4122 while (fn > 0) {
3947 fn--; 4123 fn--;
3948 DEBUG17(qla_printk(KERN_INFO, ha, 4124 ql_dbg(ql_dbg_aer, base_vha, 0x9007,
3949 "Finding pci device at function = 0x%x\n", fn)); 4125 "Finding pci device at function = 0x%x.\n", fn);
3950 other_pdev = 4126 other_pdev =
3951 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 4127 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3952 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 4128 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
@@ -3955,9 +4131,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3955 if (!other_pdev) 4131 if (!other_pdev)
3956 continue; 4132 continue;
3957 if (atomic_read(&other_pdev->enable_cnt)) { 4133 if (atomic_read(&other_pdev->enable_cnt)) {
3958 DEBUG17(qla_printk(KERN_INFO, ha, 4134 ql_dbg(ql_dbg_aer, base_vha, 0x9008,
3959 "Found PCI func available and enabled at 0x%x\n", 4135 "Found PCI func available and enable at 0x%x.\n",
3960 fn)); 4136 fn);
3961 pci_dev_put(other_pdev); 4137 pci_dev_put(other_pdev);
3962 break; 4138 break;
3963 } 4139 }
@@ -3966,8 +4142,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3966 4142
3967 if (!fn) { 4143 if (!fn) {
3968 /* Reset owner */ 4144 /* Reset owner */
3969 DEBUG17(qla_printk(KERN_INFO, ha, 4145 ql_dbg(ql_dbg_aer, base_vha, 0x9009,
3970 "This devfn is reset owner = 0x%x\n", ha->pdev->devfn)); 4146 "This devfn is reset owner = 0x%x.\n",
4147 ha->pdev->devfn);
3971 qla82xx_idc_lock(ha); 4148 qla82xx_idc_lock(ha);
3972 4149
3973 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4150 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
@@ -3977,8 +4154,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3977 QLA82XX_IDC_VERSION); 4154 QLA82XX_IDC_VERSION);
3978 4155
3979 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 4156 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3980 DEBUG17(qla_printk(KERN_INFO, ha, 4157 ql_dbg(ql_dbg_aer, base_vha, 0x900a,
3981 "drv_active = 0x%x\n", drv_active)); 4158 "drv_active = 0x%x.\n", drv_active);
3982 4159
3983 qla82xx_idc_unlock(ha); 4160 qla82xx_idc_unlock(ha);
3984 /* Reset if device is not already reset 4161 /* Reset if device is not already reset
@@ -3991,12 +4168,14 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3991 qla82xx_idc_lock(ha); 4168 qla82xx_idc_lock(ha);
3992 4169
3993 if (rval != QLA_SUCCESS) { 4170 if (rval != QLA_SUCCESS) {
3994 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 4171 ql_log(ql_log_info, base_vha, 0x900b,
4172 "HW State: FAILED.\n");
3995 qla82xx_clear_drv_active(ha); 4173 qla82xx_clear_drv_active(ha);
3996 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4174 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3997 QLA82XX_DEV_FAILED); 4175 QLA82XX_DEV_FAILED);
3998 } else { 4176 } else {
3999 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 4177 ql_log(ql_log_info, base_vha, 0x900c,
4178 "HW State: READY.\n");
4000 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4179 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4001 QLA82XX_DEV_READY); 4180 QLA82XX_DEV_READY);
4002 qla82xx_idc_unlock(ha); 4181 qla82xx_idc_unlock(ha);
@@ -4009,8 +4188,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4009 } 4188 }
4010 qla82xx_idc_unlock(ha); 4189 qla82xx_idc_unlock(ha);
4011 } else { 4190 } else {
4012 DEBUG17(qla_printk(KERN_INFO, ha, 4191 ql_dbg(ql_dbg_aer, base_vha, 0x900d,
4013 "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); 4192 "This devfn is not reset owner = 0x%x.\n",
4193 ha->pdev->devfn);
4014 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 4194 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4015 QLA82XX_DEV_READY)) { 4195 QLA82XX_DEV_READY)) {
4016 ha->flags.isp82xx_fw_hung = 0; 4196 ha->flags.isp82xx_fw_hung = 0;
@@ -4034,7 +4214,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4034 struct rsp_que *rsp; 4214 struct rsp_que *rsp;
4035 int rc, retries = 10; 4215 int rc, retries = 10;
4036 4216
4037 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); 4217 ql_dbg(ql_dbg_aer, base_vha, 0x9004,
4218 "Slot Reset.\n");
4038 4219
4039 /* Workaround: qla2xxx driver which access hardware earlier 4220 /* Workaround: qla2xxx driver which access hardware earlier
4040 * needs error state to be pci_channel_io_online. 4221 * needs error state to be pci_channel_io_online.
@@ -4055,7 +4236,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4055 rc = pci_enable_device(pdev); 4236 rc = pci_enable_device(pdev);
4056 4237
4057 if (rc) { 4238 if (rc) {
4058 qla_printk(KERN_WARNING, ha, 4239 ql_log(ql_log_warn, base_vha, 0x9005,
4059 "Can't re-enable PCI device after reset.\n"); 4240 "Can't re-enable PCI device after reset.\n");
4060 goto exit_slot_reset; 4241 goto exit_slot_reset;
4061 } 4242 }
@@ -4085,8 +4266,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4085 4266
4086 4267
4087exit_slot_reset: 4268exit_slot_reset:
4088 DEBUG17(qla_printk(KERN_WARNING, ha, 4269 ql_dbg(ql_dbg_aer, base_vha, 0x900e,
4089 "slot_reset-return:ret=%x\n", ret)); 4270 "slot_reset return %x.\n", ret);
4090 4271
4091 return ret; 4272 return ret;
4092} 4273}
@@ -4098,13 +4279,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
4098 struct qla_hw_data *ha = base_vha->hw; 4279 struct qla_hw_data *ha = base_vha->hw;
4099 int ret; 4280 int ret;
4100 4281
4101 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); 4282 ql_dbg(ql_dbg_aer, base_vha, 0x900f,
4283 "pci_resume.\n");
4102 4284
4103 ret = qla2x00_wait_for_hba_online(base_vha); 4285 ret = qla2x00_wait_for_hba_online(base_vha);
4104 if (ret != QLA_SUCCESS) { 4286 if (ret != QLA_SUCCESS) {
4105 qla_printk(KERN_ERR, ha, 4287 ql_log(ql_log_fatal, base_vha, 0x9002,
4106 "the device failed to resume I/O " 4288 "The device failed to resume I/O from slot/link_reset.\n");
4107 "from slot/link_reset");
4108 } 4289 }
4109 4290
4110 pci_cleanup_aer_uncorrect_error_status(pdev); 4291 pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -4168,8 +4349,8 @@ qla2x00_module_init(void)
4168 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 4349 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
4169 SLAB_HWCACHE_ALIGN, NULL); 4350 SLAB_HWCACHE_ALIGN, NULL);
4170 if (srb_cachep == NULL) { 4351 if (srb_cachep == NULL) {
4171 printk(KERN_ERR 4352 ql_log(ql_log_fatal, NULL, 0x0001,
4172 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 4353 "Unable to allocate SRB cache...Failing load!.\n");
4173 return -ENOMEM; 4354 return -ENOMEM;
4174 } 4355 }
4175 4356
@@ -4182,13 +4363,15 @@ qla2x00_module_init(void)
4182 fc_attach_transport(&qla2xxx_transport_functions); 4363 fc_attach_transport(&qla2xxx_transport_functions);
4183 if (!qla2xxx_transport_template) { 4364 if (!qla2xxx_transport_template) {
4184 kmem_cache_destroy(srb_cachep); 4365 kmem_cache_destroy(srb_cachep);
4366 ql_log(ql_log_fatal, NULL, 0x0002,
4367 "fc_attach_transport failed...Failing load!.\n");
4185 return -ENODEV; 4368 return -ENODEV;
4186 } 4369 }
4187 4370
4188 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 4371 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
4189 if (apidev_major < 0) { 4372 if (apidev_major < 0) {
4190 printk(KERN_WARNING "qla2xxx: Unable to register char device " 4373 ql_log(ql_log_fatal, NULL, 0x0003,
4191 "%s\n", QLA2XXX_APIDEV); 4374 "Unable to register char device %s.\n", QLA2XXX_APIDEV);
4192 } 4375 }
4193 4376
4194 qla2xxx_transport_vport_template = 4377 qla2xxx_transport_vport_template =
@@ -4196,16 +4379,21 @@ qla2x00_module_init(void)
4196 if (!qla2xxx_transport_vport_template) { 4379 if (!qla2xxx_transport_vport_template) {
4197 kmem_cache_destroy(srb_cachep); 4380 kmem_cache_destroy(srb_cachep);
4198 fc_release_transport(qla2xxx_transport_template); 4381 fc_release_transport(qla2xxx_transport_template);
4382 ql_log(ql_log_fatal, NULL, 0x0004,
4383 "fc_attach_transport vport failed...Failing load!.\n");
4199 return -ENODEV; 4384 return -ENODEV;
4200 } 4385 }
4201 4386 ql_log(ql_log_info, NULL, 0x0005,
4202 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", 4387 "QLogic Fibre Channel HBA Driver: %s.\n",
4203 qla2x00_version_str); 4388 qla2x00_version_str);
4204 ret = pci_register_driver(&qla2xxx_pci_driver); 4389 ret = pci_register_driver(&qla2xxx_pci_driver);
4205 if (ret) { 4390 if (ret) {
4206 kmem_cache_destroy(srb_cachep); 4391 kmem_cache_destroy(srb_cachep);
4207 fc_release_transport(qla2xxx_transport_template); 4392 fc_release_transport(qla2xxx_transport_template);
4208 fc_release_transport(qla2xxx_transport_vport_template); 4393 fc_release_transport(qla2xxx_transport_vport_template);
4394 ql_log(ql_log_fatal, NULL, 0x0006,
4395 "pci_register_driver failed...ret=%d Failing load!.\n",
4396 ret);
4209 } 4397 }
4210 return ret; 4398 return ret;
4211} 4399}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 693647661ed1..eff13563c82d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
189 uint16_t word; 189 uint16_t word;
190 uint32_t nv_cmd, wait_cnt; 190 uint32_t nv_cmd, wait_cnt;
191 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 191 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
192 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
192 193
193 qla2x00_nv_write(ha, NVR_DATA_OUT); 194 qla2x00_nv_write(ha, NVR_DATA_OUT);
194 qla2x00_nv_write(ha, 0); 195 qla2x00_nv_write(ha, 0);
@@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
220 wait_cnt = NVR_WAIT_CNT; 221 wait_cnt = NVR_WAIT_CNT;
221 do { 222 do {
222 if (!--wait_cnt) { 223 if (!--wait_cnt) {
223 DEBUG9_10(qla_printk(KERN_WARNING, ha, 224 ql_dbg(ql_dbg_user, vha, 0x708d,
224 "NVRAM didn't go ready...\n")); 225 "NVRAM didn't go ready...\n");
225 break; 226 break;
226 } 227 }
227 NVRAM_DELAY(); 228 NVRAM_DELAY();
@@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
308 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 309 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
309 uint32_t word, wait_cnt; 310 uint32_t word, wait_cnt;
310 uint16_t wprot, wprot_old; 311 uint16_t wprot, wprot_old;
312 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
311 313
312 /* Clear NVRAM write protection. */ 314 /* Clear NVRAM write protection. */
313 ret = QLA_FUNCTION_FAILED; 315 ret = QLA_FUNCTION_FAILED;
@@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
350 wait_cnt = NVR_WAIT_CNT; 352 wait_cnt = NVR_WAIT_CNT;
351 do { 353 do {
352 if (!--wait_cnt) { 354 if (!--wait_cnt) {
353 DEBUG9_10(qla_printk(KERN_WARNING, ha, 355 ql_dbg(ql_dbg_user, vha, 0x708e,
354 "NVRAM didn't go ready...\n")); 356 "NVRAM didn't go ready...\n");
355 break; 357 break;
356 } 358 }
357 NVRAM_DELAY(); 359 NVRAM_DELAY();
@@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
371{ 373{
372 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 374 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
373 uint32_t word, wait_cnt; 375 uint32_t word, wait_cnt;
376 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
374 377
375 if (stat != QLA_SUCCESS) 378 if (stat != QLA_SUCCESS)
376 return; 379 return;
@@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
409 wait_cnt = NVR_WAIT_CNT; 412 wait_cnt = NVR_WAIT_CNT;
410 do { 413 do {
411 if (!--wait_cnt) { 414 if (!--wait_cnt) {
412 DEBUG9_10(qla_printk(KERN_WARNING, ha, 415 ql_dbg(ql_dbg_user, vha, 0x708f,
413 "NVRAM didn't go ready...\n")); 416 "NVRAM didn't go ready...\n");
414 break; 417 break;
415 } 418 }
416 NVRAM_DELAY(); 419 NVRAM_DELAY();
@@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
607 for (chksum = 0; cnt; cnt--) 610 for (chksum = 0; cnt; cnt--)
608 chksum += le16_to_cpu(*wptr++); 611 chksum += le16_to_cpu(*wptr++);
609 if (chksum) { 612 if (chksum) {
610 qla_printk(KERN_ERR, ha, 613 ql_log(ql_log_fatal, vha, 0x0045,
611 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); 614 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
612 qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location)); 615 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
616 buf, sizeof(struct qla_flt_location));
613 return QLA_FUNCTION_FAILED; 617 return QLA_FUNCTION_FAILED;
614 } 618 }
615 619
@@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
618 *start = (le16_to_cpu(fltl->start_hi) << 16 | 622 *start = (le16_to_cpu(fltl->start_hi) << 16 |
619 le16_to_cpu(fltl->start_lo)) >> 2; 623 le16_to_cpu(fltl->start_lo)) >> 2;
620end: 624end:
621 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); 625 ql_dbg(ql_dbg_init, vha, 0x0046,
626 "FLTL[%s] = 0x%x.\n",
627 loc, *start);
622 return QLA_SUCCESS; 628 return QLA_SUCCESS;
623} 629}
624 630
@@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
685 if (*wptr == __constant_cpu_to_le16(0xffff)) 691 if (*wptr == __constant_cpu_to_le16(0xffff))
686 goto no_flash_data; 692 goto no_flash_data;
687 if (flt->version != __constant_cpu_to_le16(1)) { 693 if (flt->version != __constant_cpu_to_le16(1)) {
688 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: " 694 ql_log(ql_log_warn, vha, 0x0047,
689 "version=0x%x length=0x%x checksum=0x%x.\n", 695 "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
690 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 696 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
691 le16_to_cpu(flt->checksum))); 697 le16_to_cpu(flt->checksum));
692 goto no_flash_data; 698 goto no_flash_data;
693 } 699 }
694 700
@@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
696 for (chksum = 0; cnt; cnt--) 702 for (chksum = 0; cnt; cnt--)
697 chksum += le16_to_cpu(*wptr++); 703 chksum += le16_to_cpu(*wptr++);
698 if (chksum) { 704 if (chksum) {
699 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: " 705 ql_log(ql_log_fatal, vha, 0x0048,
700 "version=0x%x length=0x%x checksum=0x%x.\n", 706 "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
701 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 707 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
702 chksum)); 708 le16_to_cpu(flt->checksum));
703 goto no_flash_data; 709 goto no_flash_data;
704 } 710 }
705 711
@@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
708 for ( ; cnt; cnt--, region++) { 714 for ( ; cnt; cnt--, region++) {
709 /* Store addresses as DWORD offsets. */ 715 /* Store addresses as DWORD offsets. */
710 start = le32_to_cpu(region->start) >> 2; 716 start = le32_to_cpu(region->start) >> 2;
711 717 ql_dbg(ql_dbg_init, vha, 0x0049,
712 DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x " 718 "FLT[%02x]: start=0x%x "
713 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, 719 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
714 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); 720 start, le32_to_cpu(region->end) >> 2,
721 le32_to_cpu(region->size));
715 722
716 switch (le32_to_cpu(region->code) & 0xff) { 723 switch (le32_to_cpu(region->code) & 0xff) {
717 case FLT_REG_FW: 724 case FLT_REG_FW:
@@ -796,12 +803,16 @@ no_flash_data:
796 ha->flt_region_npiv_conf = ha->flags.port0 ? 803 ha->flt_region_npiv_conf = ha->flags.port0 ?
797 def_npiv_conf0[def] : def_npiv_conf1[def]; 804 def_npiv_conf0[def] : def_npiv_conf1[def];
798done: 805done:
799 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 806 ql_dbg(ql_dbg_init, vha, 0x004a,
800 "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x " 807 "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
801 "npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot, 808 loc, ha->flt_region_boot,
802 ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd, 809 ha->flt_region_fw, ha->flt_region_vpd_nvram,
803 ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt, 810 ha->flt_region_vpd);
804 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio)); 811 ql_dbg(ql_dbg_init, vha, 0x004b,
812 "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
813 ha->flt_region_nvram,
814 ha->flt_region_fdt, ha->flt_region_flt,
815 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
805} 816}
806 817
807static void 818static void
@@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
833 cnt++) 844 cnt++)
834 chksum += le16_to_cpu(*wptr++); 845 chksum += le16_to_cpu(*wptr++);
835 if (chksum) { 846 if (chksum) {
836 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: " 847 ql_dbg(ql_dbg_init, vha, 0x004c,
837 "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0], 848 "Inconsistent FDT detected:"
838 le16_to_cpu(fdt->version))); 849 " checksum=0x%x id=%c version0x%x.\n", chksum,
839 DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt))); 850 fdt->sig[0], le16_to_cpu(fdt->version));
851 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
852 (uint8_t *)fdt, sizeof(*fdt));
840 goto no_flash_data; 853 goto no_flash_data;
841 } 854 }
842 855
@@ -890,11 +903,12 @@ no_flash_data:
890 break; 903 break;
891 } 904 }
892done: 905done:
893 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " 906 ql_dbg(ql_dbg_init, vha, 0x004d,
894 "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, 907 "FDT[%x]: (0x%x/0x%x) erase=0x%x "
908 "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
895 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 909 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
896 ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable, 910 ha->fdt_wrt_disable, ha->fdt_block_size);
897 ha->fdt_block_size)); 911
898} 912}
899 913
900static void 914static void
@@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
919 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); 933 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
920 ha->nx_reset_timeout = le32_to_cpu(*wptr); 934 ha->nx_reset_timeout = le32_to_cpu(*wptr);
921 } 935 }
936 ql_dbg(ql_dbg_init, vha, 0x004e,
937 "nx_dev_init_timeout=%d "
938 "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
939 ha->nx_reset_timeout);
922 return; 940 return;
923} 941}
924 942
@@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
963 if (hdr.version == __constant_cpu_to_le16(0xffff)) 981 if (hdr.version == __constant_cpu_to_le16(0xffff))
964 return; 982 return;
965 if (hdr.version != __constant_cpu_to_le16(1)) { 983 if (hdr.version != __constant_cpu_to_le16(1)) {
966 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config " 984 ql_dbg(ql_dbg_user, vha, 0x7090,
985 "Unsupported NPIV-Config "
967 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 986 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
968 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), 987 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
969 le16_to_cpu(hdr.checksum))); 988 le16_to_cpu(hdr.checksum));
970 return; 989 return;
971 } 990 }
972 991
973 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL); 992 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
974 if (!data) { 993 if (!data) {
975 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to " 994 ql_log(ql_log_warn, vha, 0x7091,
976 "allocate memory.\n")); 995 "Unable to allocate memory for data.\n");
977 return; 996 return;
978 } 997 }
979 998
@@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
985 for (wptr = data, chksum = 0; cnt; cnt--) 1004 for (wptr = data, chksum = 0; cnt; cnt--)
986 chksum += le16_to_cpu(*wptr++); 1005 chksum += le16_to_cpu(*wptr++);
987 if (chksum) { 1006 if (chksum) {
988 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config " 1007 ql_dbg(ql_dbg_user, vha, 0x7092,
1008 "Inconsistent NPIV-Config "
989 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 1009 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
990 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), 1010 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
991 chksum)); 1011 le16_to_cpu(hdr.checksum));
992 goto done; 1012 goto done;
993 } 1013 }
994 1014
@@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1014 vid.port_name = wwn_to_u64(entry->port_name); 1034 vid.port_name = wwn_to_u64(entry->port_name);
1015 vid.node_name = wwn_to_u64(entry->node_name); 1035 vid.node_name = wwn_to_u64(entry->node_name);
1016 1036
1017 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " 1037 ql_dbg(ql_dbg_user, vha, 0x7093,
1018 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 1038 "NPIV[%02x]: wwpn=%llx "
1019 (unsigned long long)vid.port_name, 1039 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
1020 (unsigned long long)vid.node_name, 1040 (unsigned long long)vid.port_name,
1021 le16_to_cpu(entry->vf_id), 1041 (unsigned long long)vid.node_name,
1022 entry->q_qos, entry->f_qos)); 1042 le16_to_cpu(entry->vf_id),
1043 entry->q_qos, entry->f_qos);
1023 1044
1024 if (i < QLA_PRECONFIG_VPORTS) { 1045 if (i < QLA_PRECONFIG_VPORTS) {
1025 vport = fc_vport_create(vha->host, 0, &vid); 1046 vport = fc_vport_create(vha->host, 0, &vid);
1026 if (!vport) 1047 if (!vport)
1027 qla_printk(KERN_INFO, ha, 1048 ql_log(ql_log_warn, vha, 0x7094,
1028 "NPIV-Config: Failed to create vport [%02x]: " 1049 "NPIV-Config Failed to create vport [%02x]: "
1029 "wwpn=%llx wwnn=%llx.\n", cnt, 1050 "wwpn=%llx wwnn=%llx.\n", cnt,
1030 (unsigned long long)vid.port_name, 1051 (unsigned long long)vid.port_name,
1031 (unsigned long long)vid.node_name); 1052 (unsigned long long)vid.node_name);
1032 } 1053 }
1033 } 1054 }
1034done: 1055done:
@@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1127 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 1148 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
1128 &optrom_dma, GFP_KERNEL); 1149 &optrom_dma, GFP_KERNEL);
1129 if (!optrom) { 1150 if (!optrom) {
1130 qla_printk(KERN_DEBUG, ha, 1151 ql_log(ql_log_warn, vha, 0x7095,
1131 "Unable to allocate memory for optrom burst write " 1152 "Unable to allocate "
1132 "(%x KB).\n", OPTROM_BURST_SIZE / 1024); 1153 "memory for optrom burst write (%x KB).\n",
1154 OPTROM_BURST_SIZE / 1024);
1133 } 1155 }
1134 } 1156 }
1135 1157
@@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1138 1160
1139 ret = qla24xx_unprotect_flash(vha); 1161 ret = qla24xx_unprotect_flash(vha);
1140 if (ret != QLA_SUCCESS) { 1162 if (ret != QLA_SUCCESS) {
1141 qla_printk(KERN_WARNING, ha, 1163 ql_log(ql_log_warn, vha, 0x7096,
1142 "Unable to unprotect flash for update.\n"); 1164 "Unable to unprotect flash for update.\n");
1143 goto done; 1165 goto done;
1144 } 1166 }
@@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1156 0xff0000) | ((fdata >> 16) & 0xff)); 1178 0xff0000) | ((fdata >> 16) & 0xff));
1157 ret = qla24xx_erase_sector(vha, fdata); 1179 ret = qla24xx_erase_sector(vha, fdata);
1158 if (ret != QLA_SUCCESS) { 1180 if (ret != QLA_SUCCESS) {
1159 DEBUG9(qla_printk(KERN_WARNING, ha, 1181 ql_dbg(ql_dbg_user, vha, 0x7007,
1160 "Unable to erase sector: address=%x.\n", 1182 "Unable to erase erase sector: address=%x.\n",
1161 faddr)); 1183 faddr);
1162 break; 1184 break;
1163 } 1185 }
1164 } 1186 }
@@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1172 flash_data_addr(ha, faddr), 1194 flash_data_addr(ha, faddr),
1173 OPTROM_BURST_DWORDS); 1195 OPTROM_BURST_DWORDS);
1174 if (ret != QLA_SUCCESS) { 1196 if (ret != QLA_SUCCESS) {
1175 qla_printk(KERN_WARNING, ha, 1197 ql_log(ql_log_warn, vha, 0x7097,
1176 "Unable to burst-write optrom segment " 1198 "Unable to burst-write optrom segment "
1177 "(%x/%x/%llx).\n", ret, 1199 "(%x/%x/%llx).\n", ret,
1178 flash_data_addr(ha, faddr), 1200 flash_data_addr(ha, faddr),
1179 (unsigned long long)optrom_dma); 1201 (unsigned long long)optrom_dma);
1180 qla_printk(KERN_WARNING, ha, 1202 ql_log(ql_log_warn, vha, 0x7098,
1181 "Reverting to slow-write.\n"); 1203 "Reverting to slow-write.\n");
1182 1204
1183 dma_free_coherent(&ha->pdev->dev, 1205 dma_free_coherent(&ha->pdev->dev,
@@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1194 ret = qla24xx_write_flash_dword(ha, 1216 ret = qla24xx_write_flash_dword(ha,
1195 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr)); 1217 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
1196 if (ret != QLA_SUCCESS) { 1218 if (ret != QLA_SUCCESS) {
1197 DEBUG9(printk("%s(%ld) Unable to program flash " 1219 ql_dbg(ql_dbg_user, vha, 0x7006,
1198 "address=%x data=%x.\n", __func__, 1220 "Unable to program flash address=%x data=%x.\n",
1199 vha->host_no, faddr, *dwptr)); 1221 faddr, *dwptr);
1200 break; 1222 break;
1201 } 1223 }
1202 1224
@@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1211 1233
1212 ret = qla24xx_protect_flash(vha); 1234 ret = qla24xx_protect_flash(vha);
1213 if (ret != QLA_SUCCESS) 1235 if (ret != QLA_SUCCESS)
1214 qla_printk(KERN_WARNING, ha, 1236 ql_log(ql_log_warn, vha, 0x7099,
1215 "Unable to protect flash after update.\n"); 1237 "Unable to protect flash after update.\n");
1216done: 1238done:
1217 if (optrom) 1239 if (optrom)
@@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1324 ret = qla24xx_write_flash_dword(ha, 1346 ret = qla24xx_write_flash_dword(ha,
1325 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1347 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1326 if (ret != QLA_SUCCESS) { 1348 if (ret != QLA_SUCCESS) {
1327 DEBUG9(qla_printk(KERN_WARNING, ha, 1349 ql_dbg(ql_dbg_user, vha, 0x709a,
1328 "Unable to program nvram address=%x data=%x.\n", 1350 "Unable to program nvram address=%x data=%x.\n",
1329 naddr, *dwptr)); 1351 naddr, *dwptr);
1330 break; 1352 break;
1331 } 1353 }
1332 } 1354 }
@@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
1476 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 1498 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
1477 1499
1478 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1500 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1479 qla_printk(KERN_WARNING, ha, 1501 ql_log(ql_log_warn, vha, 0x709b,
1480 "Unable to update fw options (beacon on).\n"); 1502 "Unable to update fw options (beacon on).\n");
1481 return QLA_FUNCTION_FAILED; 1503 return QLA_FUNCTION_FAILED;
1482 } 1504 }
@@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha)
1541 1563
1542 rval = qla2x00_set_fw_options(vha, ha->fw_options); 1564 rval = qla2x00_set_fw_options(vha, ha->fw_options);
1543 if (rval != QLA_SUCCESS) 1565 if (rval != QLA_SUCCESS)
1544 qla_printk(KERN_WARNING, ha, 1566 ql_log(ql_log_warn, vha, 0x709c,
1545 "Unable to update fw options (beacon off).\n"); 1567 "Unable to update fw options (beacon off).\n");
1546 return rval; 1568 return rval;
1547} 1569}
@@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1616 1638
1617 if (qla2x00_get_fw_options(vha, ha->fw_options) != 1639 if (qla2x00_get_fw_options(vha, ha->fw_options) !=
1618 QLA_SUCCESS) { 1640 QLA_SUCCESS) {
1619 qla_printk(KERN_WARNING, ha, 1641 ql_log(ql_log_warn, vha, 0x7009,
1620 "Unable to update fw options (beacon on).\n"); 1642 "Unable to update fw options (beacon on).\n");
1621 return QLA_FUNCTION_FAILED; 1643 return QLA_FUNCTION_FAILED;
1622 } 1644 }
@@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1670 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 1692 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
1671 1693
1672 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1694 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1673 qla_printk(KERN_WARNING, ha, 1695 ql_log(ql_log_warn, vha, 0x704d,
1674 "Unable to update fw options (beacon off).\n"); 1696 "Unable to update fw options (beacon on).\n");
1675 return QLA_FUNCTION_FAILED; 1697 return QLA_FUNCTION_FAILED;
1676 } 1698 }
1677 1699
1678 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1700 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1679 qla_printk(KERN_WARNING, ha, 1701 ql_log(ql_log_warn, vha, 0x704e,
1680 "Unable to get fw options (beacon off).\n"); 1702 "Unable to update fw options (beacon on).\n");
1681 return QLA_FUNCTION_FAILED; 1703 return QLA_FUNCTION_FAILED;
1682 } 1704 }
1683 1705
@@ -2389,10 +2411,9 @@ try_fast:
2389 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2411 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
2390 &optrom_dma, GFP_KERNEL); 2412 &optrom_dma, GFP_KERNEL);
2391 if (!optrom) { 2413 if (!optrom) {
2392 qla_printk(KERN_DEBUG, ha, 2414 ql_log(ql_log_warn, vha, 0x00cc,
2393 "Unable to allocate memory for optrom burst read " 2415 "Unable to allocate memory for optrom burst read (%x KB).\n",
2394 "(%x KB).\n", OPTROM_BURST_SIZE / 1024); 2416 OPTROM_BURST_SIZE / 1024);
2395
2396 goto slow_read; 2417 goto slow_read;
2397 } 2418 }
2398 2419
@@ -2407,12 +2428,11 @@ try_fast:
2407 rval = qla2x00_dump_ram(vha, optrom_dma, 2428 rval = qla2x00_dump_ram(vha, optrom_dma,
2408 flash_data_addr(ha, faddr), burst); 2429 flash_data_addr(ha, faddr), burst);
2409 if (rval) { 2430 if (rval) {
2410 qla_printk(KERN_WARNING, ha, 2431 ql_log(ql_log_warn, vha, 0x00f5,
2411 "Unable to burst-read optrom segment " 2432 "Unable to burst-read optrom segment (%x/%x/%llx).\n",
2412 "(%x/%x/%llx).\n", rval, 2433 rval, flash_data_addr(ha, faddr),
2413 flash_data_addr(ha, faddr),
2414 (unsigned long long)optrom_dma); 2434 (unsigned long long)optrom_dma);
2415 qla_printk(KERN_WARNING, ha, 2435 ql_log(ql_log_warn, vha, 0x00f6,
2416 "Reverting to slow-read.\n"); 2436 "Reverting to slow-read.\n");
2417 2437
2418 dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2438 dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
@@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2556 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || 2576 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
2557 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { 2577 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
2558 /* No signature */ 2578 /* No signature */
2559 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " 2579 ql_log(ql_log_fatal, vha, 0x0050,
2560 "signature.\n")); 2580 "No matching ROM signature.\n");
2561 ret = QLA_FUNCTION_FAILED; 2581 ret = QLA_FUNCTION_FAILED;
2562 break; 2582 break;
2563 } 2583 }
@@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2573 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || 2593 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
2574 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { 2594 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
2575 /* Incorrect header. */ 2595 /* Incorrect header. */
2576 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " 2596 ql_log(ql_log_fatal, vha, 0x0051,
2577 "found pcir_adr=%x.\n", pcids)); 2597 "PCI data struct not found pcir_adr=%x.\n", pcids);
2578 ret = QLA_FUNCTION_FAILED; 2598 ret = QLA_FUNCTION_FAILED;
2579 break; 2599 break;
2580 } 2600 }
@@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2588 qla2x00_read_flash_byte(ha, pcids + 0x12); 2608 qla2x00_read_flash_byte(ha, pcids + 0x12);
2589 ha->bios_revision[1] = 2609 ha->bios_revision[1] =
2590 qla2x00_read_flash_byte(ha, pcids + 0x13); 2610 qla2x00_read_flash_byte(ha, pcids + 0x13);
2591 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", 2611 ql_dbg(ql_dbg_init, vha, 0x0052,
2592 ha->bios_revision[1], ha->bios_revision[0])); 2612 "Read BIOS %d.%d.\n",
2613 ha->bios_revision[1], ha->bios_revision[0]);
2593 break; 2614 break;
2594 case ROM_CODE_TYPE_FCODE: 2615 case ROM_CODE_TYPE_FCODE:
2595 /* Open Firmware standard for PCI (FCode). */ 2616 /* Open Firmware standard for PCI (FCode). */
@@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2602 qla2x00_read_flash_byte(ha, pcids + 0x12); 2623 qla2x00_read_flash_byte(ha, pcids + 0x12);
2603 ha->efi_revision[1] = 2624 ha->efi_revision[1] =
2604 qla2x00_read_flash_byte(ha, pcids + 0x13); 2625 qla2x00_read_flash_byte(ha, pcids + 0x13);
2605 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", 2626 ql_dbg(ql_dbg_init, vha, 0x0053,
2606 ha->efi_revision[1], ha->efi_revision[0])); 2627 "Read EFI %d.%d.\n",
2628 ha->efi_revision[1], ha->efi_revision[0]);
2607 break; 2629 break;
2608 default: 2630 default:
2609 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " 2631 ql_log(ql_log_warn, vha, 0x0054,
2610 "type %x at pcids %x.\n", code_type, pcids)); 2632 "Unrecognized code type %x at pcids %x.\n",
2633 code_type, pcids);
2611 break; 2634 break;
2612 } 2635 }
2613 2636
@@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2627 2650
2628 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 2651 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2629 8); 2652 8);
2630 DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from " 2653 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
2631 "flash:\n")); 2654 "Dumping fw "
2632 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); 2655 "ver from flash:.\n");
2656 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
2657 (uint8_t *)dbyte, 8);
2633 2658
2634 if ((dcode[0] == 0xffff && dcode[1] == 0xffff && 2659 if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
2635 dcode[2] == 0xffff && dcode[3] == 0xffff) || 2660 dcode[2] == 0xffff && dcode[3] == 0xffff) ||
2636 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2661 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2637 dcode[3] == 0)) { 2662 dcode[3] == 0)) {
2638 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " 2663 ql_log(ql_log_warn, vha, 0x0057,
2639 "revision at %x.\n", ha->flt_region_fw * 4)); 2664 "Unrecognized fw revision at %x.\n",
2665 ha->flt_region_fw * 4);
2640 } else { 2666 } else {
2641 /* values are in big endian */ 2667 /* values are in big endian */
2642 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2668 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
2643 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3]; 2669 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
2644 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5]; 2670 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
2671 ql_dbg(ql_dbg_init, vha, 0x0058,
2672 "FW Version: "
2673 "%d.%d.%d.\n", ha->fw_revision[0],
2674 ha->fw_revision[1], ha->fw_revision[2]);
2645 } 2675 }
2646 } 2676 }
2647 2677
@@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2683 bcode = mbuf + (pcihdr % 4); 2713 bcode = mbuf + (pcihdr % 4);
2684 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { 2714 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
2685 /* No signature */ 2715 /* No signature */
2686 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " 2716 ql_log(ql_log_fatal, vha, 0x0059,
2687 "signature.\n")); 2717 "No matching ROM signature.\n");
2688 ret = QLA_FUNCTION_FAILED; 2718 ret = QLA_FUNCTION_FAILED;
2689 break; 2719 break;
2690 } 2720 }
@@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2699 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || 2729 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2700 bcode[0x2] != 'I' || bcode[0x3] != 'R') { 2730 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2701 /* Incorrect header. */ 2731 /* Incorrect header. */
2702 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " 2732 ql_log(ql_log_fatal, vha, 0x005a,
2703 "found pcir_adr=%x.\n", pcids)); 2733 "PCI data struct not found pcir_adr=%x.\n", pcids);
2704 ret = QLA_FUNCTION_FAILED; 2734 ret = QLA_FUNCTION_FAILED;
2705 break; 2735 break;
2706 } 2736 }
@@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2712 /* Intel x86, PC-AT compatible. */ 2742 /* Intel x86, PC-AT compatible. */
2713 ha->bios_revision[0] = bcode[0x12]; 2743 ha->bios_revision[0] = bcode[0x12];
2714 ha->bios_revision[1] = bcode[0x13]; 2744 ha->bios_revision[1] = bcode[0x13];
2715 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", 2745 ql_dbg(ql_dbg_init, vha, 0x005b,
2716 ha->bios_revision[1], ha->bios_revision[0])); 2746 "Read BIOS %d.%d.\n",
2747 ha->bios_revision[1], ha->bios_revision[0]);
2717 break; 2748 break;
2718 case ROM_CODE_TYPE_FCODE: 2749 case ROM_CODE_TYPE_FCODE:
2719 /* Open Firmware standard for PCI (FCode). */ 2750 /* Open Firmware standard for PCI (FCode). */
2720 ha->fcode_revision[0] = bcode[0x12]; 2751 ha->fcode_revision[0] = bcode[0x12];
2721 ha->fcode_revision[1] = bcode[0x13]; 2752 ha->fcode_revision[1] = bcode[0x13];
2722 DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n", 2753 ql_dbg(ql_dbg_init, vha, 0x005c,
2723 ha->fcode_revision[1], ha->fcode_revision[0])); 2754 "Read FCODE %d.%d.\n",
2755 ha->fcode_revision[1], ha->fcode_revision[0]);
2724 break; 2756 break;
2725 case ROM_CODE_TYPE_EFI: 2757 case ROM_CODE_TYPE_EFI:
2726 /* Extensible Firmware Interface (EFI). */ 2758 /* Extensible Firmware Interface (EFI). */
2727 ha->efi_revision[0] = bcode[0x12]; 2759 ha->efi_revision[0] = bcode[0x12];
2728 ha->efi_revision[1] = bcode[0x13]; 2760 ha->efi_revision[1] = bcode[0x13];
2729 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", 2761 ql_dbg(ql_dbg_init, vha, 0x005d,
2730 ha->efi_revision[1], ha->efi_revision[0])); 2762 "Read EFI %d.%d.\n",
2763 ha->efi_revision[1], ha->efi_revision[0]);
2731 break; 2764 break;
2732 default: 2765 default:
2733 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " 2766 ql_log(ql_log_warn, vha, 0x005e,
2734 "type %x at pcids %x.\n", code_type, pcids)); 2767 "Unrecognized code type %x at pcids %x.\n",
2768 code_type, pcids);
2735 break; 2769 break;
2736 } 2770 }
2737 2771
@@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2753 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 2787 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
2754 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2788 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2755 dcode[3] == 0)) { 2789 dcode[3] == 0)) {
2756 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " 2790 ql_log(ql_log_warn, vha, 0x005f,
2757 "revision at %x.\n", ha->flt_region_fw * 4)); 2791 "Unrecognized fw revision at %x.\n",
2792 ha->flt_region_fw * 4);
2758 } else { 2793 } else {
2759 ha->fw_revision[0] = dcode[0]; 2794 ha->fw_revision[0] = dcode[0];
2760 ha->fw_revision[1] = dcode[1]; 2795 ha->fw_revision[1] = dcode[1];
2761 ha->fw_revision[2] = dcode[2]; 2796 ha->fw_revision[2] = dcode[2];
2762 ha->fw_revision[3] = dcode[3]; 2797 ha->fw_revision[3] = dcode[3];
2798 ql_dbg(ql_dbg_init, vha, 0x0060,
2799 "Firmware revision %d.%d.%d.%d.\n",
2800 ha->fw_revision[0], ha->fw_revision[1],
2801 ha->fw_revision[2], ha->fw_revision[3]);
2763 } 2802 }
2764 2803
2765 /* Check for golden firmware and get version if available */ 2804 /* Check for golden firmware and get version if available */
@@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2775 2814
2776 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF && 2815 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
2777 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) { 2816 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
2778 DEBUG2(qla_printk(KERN_INFO, ha, 2817 ql_log(ql_log_warn, vha, 0x0056,
2779 "%s(%ld): Unrecognized golden fw at 0x%x.\n", 2818 "Unrecognized golden fw at 0x%x.\n",
2780 __func__, vha->host_no, ha->flt_region_gold_fw * 4)); 2819 ha->flt_region_gold_fw * 4);
2781 return ret; 2820 return ret;
2782 } 2821 }
2783 2822
@@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2843 if (!ha->fcp_prio_cfg) { 2882 if (!ha->fcp_prio_cfg) {
2844 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 2883 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
2845 if (!ha->fcp_prio_cfg) { 2884 if (!ha->fcp_prio_cfg) {
2846 qla_printk(KERN_WARNING, ha, 2885 ql_log(ql_log_warn, vha, 0x00d5,
2847 "Unable to allocate memory for fcp priority data " 2886 "Unable to allocate memory for fcp priorty data (%x).\n",
2848 "(%x).\n", FCP_PRIO_CFG_SIZE); 2887 FCP_PRIO_CFG_SIZE);
2849 return QLA_FUNCTION_FAILED; 2888 return QLA_FUNCTION_FAILED;
2850 } 2889 }
2851 } 2890 }
@@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2857 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg, 2896 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
2858 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE); 2897 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
2859 2898
2860 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0)) 2899 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
2861 goto fail; 2900 goto fail;
2862 2901
2863 /* read remaining FCP CMD config data from flash */ 2902 /* read remaining FCP CMD config data from flash */
@@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2869 fcp_prio_addr << 2, (len < max_len ? len : max_len)); 2908 fcp_prio_addr << 2, (len < max_len ? len : max_len));
2870 2909
2871 /* revalidate the entire FCP priority config data, including entries */ 2910 /* revalidate the entire FCP priority config data, including entries */
2872 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1)) 2911 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
2873 goto fail; 2912 goto fail;
2874 2913
2875 ha->flags.fcp_prio_enabled = 1; 2914 ha->flags.fcp_prio_enabled = 1;