aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_attr.c
diff options
context:
space:
mode:
authorSaurav Kashyap <saurav.kashyap@qlogic.com>2011-07-14 15:00:13 -0400
committerJames Bottomley <JBottomley@Parallels.com>2011-07-27 06:16:17 -0400
commit7c3df1320e5e875478775e78d01a09aee96b8abe (patch)
tree215326b999b3db03f4a2268a79c3848803daaf7d /drivers/scsi/qla2xxx/qla_attr.c
parent3ce8866ceae87258cf66d1f7fd72abc918753cec (diff)
[SCSI] qla2xxx: Code changes to support new dynamic logging infrastructure.
The code is changed to support the new dynamic logging infrastructure. Following are the levels added. Default is 0 - no logging. 0x40000000 - Module Init & Probe. 0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery. 0x08000000 - IO tracing. 0x04000000 - DPC Thread. 0x02000000 - Async events. 0x01000000 - Timer routines. 0x00800000 - User space. 0x00400000 - Task Management. 0x00200000 - AER/EEH. 0x00100000 - Multi Q. 0x00080000 - P3P Specific. 0x00040000 - Virtual Port. 0x00020000 - Buffer Dump. 0x00010000 - Misc. 0x7fffffff - For enabling all logs, can be too many logs. Setting ql2xextended_error_logging module parameter to any of the above value, will enable the debug for that particular level. Do LOGICAL OR of the value to enable more than one level. Signed-off-by: Saurav Kashyap <saurav.kashyap@qlogic.com> Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com> Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com> Signed-off-by: Joe Carnuccio <joe.carnuccio@qlogic.com> Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com> Signed-off-by: Madhuranath Iyengar <Madhu.Iyengar@qlogic.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_attr.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c183
1 files changed, 97 insertions, 86 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 532313e0725e..7836eb01c7fc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
42 int reading; 42 int reading;
43 43
44 if (IS_QLA82XX(ha)) { 44 if (IS_QLA82XX(ha)) {
45 DEBUG2(qla_printk(KERN_INFO, ha, 45 ql_dbg(ql_dbg_user, vha, 0x705b,
46 "Firmware dump not supported for ISP82xx\n")); 46 "Firmware dump not supported for ISP82xx\n");
47 return count; 47 return count;
48 } 48 }
49 49
@@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
56 if (!ha->fw_dump_reading) 56 if (!ha->fw_dump_reading)
57 break; 57 break;
58 58
59 qla_printk(KERN_INFO, ha, 59 ql_log(ql_log_info, vha, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha->host_no); 60 "Firmware dump cleared on (%ld).\n", vha->host_no);
61 61
62 ha->fw_dump_reading = 0; 62 ha->fw_dump_reading = 0;
@@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
66 if (ha->fw_dumped && !ha->fw_dump_reading) { 66 if (ha->fw_dumped && !ha->fw_dump_reading) {
67 ha->fw_dump_reading = 1; 67 ha->fw_dump_reading = 1;
68 68
69 qla_printk(KERN_INFO, ha, 69 ql_log(ql_log_info, vha, 0x705e,
70 "Raw firmware dump ready for read on (%ld).\n", 70 "Raw firmware dump ready for read on (%ld).\n",
71 vha->host_no); 71 vha->host_no);
72 } 72 }
@@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
148 } 148 }
149 149
150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
151 qla_printk(KERN_WARNING, ha, 151 ql_log(ql_log_warn, vha, 0x705f,
152 "HBA not online, failing NVRAM update.\n"); 152 "HBA not online, failing NVRAM update.\n");
153 return -EAGAIN; 153 return -EAGAIN;
154 } 154 }
@@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
159 count); 159 count);
160 160
161 ql_dbg(ql_dbg_user, vha, 0x7060,
162 "Setting ISP_ABORT_NEEDED\n");
161 /* NVRAM settings take effect immediately. */ 163 /* NVRAM settings take effect immediately. */
162 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 164 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
163 qla2xxx_wake_dpc(vha); 165 qla2xxx_wake_dpc(vha);
@@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
255 257
256 ha->optrom_state = QLA_SWAITING; 258 ha->optrom_state = QLA_SWAITING;
257 259
258 DEBUG2(qla_printk(KERN_INFO, ha, 260 ql_dbg(ql_dbg_user, vha, 0x7061,
259 "Freeing flash region allocation -- 0x%x bytes.\n", 261 "Freeing flash region allocation -- 0x%x bytes.\n",
260 ha->optrom_region_size)); 262 ha->optrom_region_size);
261 263
262 vfree(ha->optrom_buffer); 264 vfree(ha->optrom_buffer);
263 ha->optrom_buffer = NULL; 265 ha->optrom_buffer = NULL;
@@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
273 ha->optrom_state = QLA_SREADING; 275 ha->optrom_state = QLA_SREADING;
274 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 276 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
275 if (ha->optrom_buffer == NULL) { 277 if (ha->optrom_buffer == NULL) {
276 qla_printk(KERN_WARNING, ha, 278 ql_log(ql_log_warn, vha, 0x7062,
277 "Unable to allocate memory for optrom retrieval " 279 "Unable to allocate memory for optrom retrieval "
278 "(%x).\n", ha->optrom_region_size); 280 "(%x).\n", ha->optrom_region_size);
279 281
@@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
282 } 284 }
283 285
284 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 286 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
285 qla_printk(KERN_WARNING, ha, 287 ql_log(ql_log_warn, vha, 0x7063,
286 "HBA not online, failing NVRAM update.\n"); 288 "HBA not online, failing NVRAM update.\n");
287 return -EAGAIN; 289 return -EAGAIN;
288 } 290 }
289 291
290 DEBUG2(qla_printk(KERN_INFO, ha, 292 ql_dbg(ql_dbg_user, vha, 0x7064,
291 "Reading flash region -- 0x%x/0x%x.\n", 293 "Reading flash region -- 0x%x/0x%x.\n",
292 ha->optrom_region_start, ha->optrom_region_size)); 294 ha->optrom_region_start, ha->optrom_region_size);
293 295
294 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 296 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
295 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 297 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
@@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
328 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) 330 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
329 valid = 1; 331 valid = 1;
330 if (!valid) { 332 if (!valid) {
331 qla_printk(KERN_WARNING, ha, 333 ql_log(ql_log_warn, vha, 0x7065,
332 "Invalid start region 0x%x/0x%x.\n", start, size); 334 "Invalid start region 0x%x/0x%x.\n", start, size);
333 return -EINVAL; 335 return -EINVAL;
334 } 336 }
@@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
340 ha->optrom_state = QLA_SWRITING; 342 ha->optrom_state = QLA_SWRITING;
341 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 343 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
342 if (ha->optrom_buffer == NULL) { 344 if (ha->optrom_buffer == NULL) {
343 qla_printk(KERN_WARNING, ha, 345 ql_log(ql_log_warn, vha, 0x7066,
344 "Unable to allocate memory for optrom update " 346 "Unable to allocate memory for optrom update "
345 "(%x).\n", ha->optrom_region_size); 347 "(%x)\n", ha->optrom_region_size);
346 348
347 ha->optrom_state = QLA_SWAITING; 349 ha->optrom_state = QLA_SWAITING;
348 return count; 350 return count;
349 } 351 }
350 352
351 DEBUG2(qla_printk(KERN_INFO, ha, 353 ql_dbg(ql_dbg_user, vha, 0x7067,
352 "Staging flash region write -- 0x%x/0x%x.\n", 354 "Staging flash region write -- 0x%x/0x%x.\n",
353 ha->optrom_region_start, ha->optrom_region_size)); 355 ha->optrom_region_start, ha->optrom_region_size);
354 356
355 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 357 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
356 break; 358 break;
@@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
359 break; 361 break;
360 362
361 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 363 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
362 qla_printk(KERN_WARNING, ha, 364 ql_log(ql_log_warn, vha, 0x7068,
363 "HBA not online, failing flash update.\n"); 365 "HBA not online, failing flash update.\n");
364 return -EAGAIN; 366 return -EAGAIN;
365 } 367 }
366 368
367 DEBUG2(qla_printk(KERN_INFO, ha, 369 ql_dbg(ql_dbg_user, vha, 0x7069,
368 "Writing flash region -- 0x%x/0x%x.\n", 370 "Writing flash region -- 0x%x/0x%x.\n",
369 ha->optrom_region_start, ha->optrom_region_size)); 371 ha->optrom_region_start, ha->optrom_region_size);
370 372
371 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 373 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
372 ha->optrom_region_start, ha->optrom_region_size); 374 ha->optrom_region_start, ha->optrom_region_size);
@@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
425 return 0; 427 return 0;
426 428
427 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 429 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
428 qla_printk(KERN_WARNING, ha, 430 ql_log(ql_log_warn, vha, 0x706a,
429 "HBA not online, failing VPD update.\n"); 431 "HBA not online, failing VPD update.\n");
430 return -EAGAIN; 432 return -EAGAIN;
431 } 433 }
@@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
440 442
441 tmp_data = vmalloc(256); 443 tmp_data = vmalloc(256);
442 if (!tmp_data) { 444 if (!tmp_data) {
443 qla_printk(KERN_WARNING, ha, 445 ql_log(ql_log_warn, vha, 0x706b,
444 "Unable to allocate memory for VPD information update.\n"); 446 "Unable to allocate memory for VPD information update.\n");
445 goto done; 447 goto done;
446 } 448 }
@@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
480 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 482 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
481 &ha->sfp_data_dma); 483 &ha->sfp_data_dma);
482 if (!ha->sfp_data) { 484 if (!ha->sfp_data) {
483 qla_printk(KERN_WARNING, ha, 485 ql_log(ql_log_warn, vha, 0x706c,
484 "Unable to allocate memory for SFP read-data.\n"); 486 "Unable to allocate memory for SFP read-data.\n");
485 return 0; 487 return 0;
486 } 488 }
@@ -499,9 +501,10 @@ do_read:
499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, 501 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
500 addr, offset, SFP_BLOCK_SIZE, 0); 502 addr, offset, SFP_BLOCK_SIZE, 0);
501 if (rval != QLA_SUCCESS) { 503 if (rval != QLA_SUCCESS) {
502 qla_printk(KERN_WARNING, ha, 504 ql_log(ql_log_warn, vha, 0x706d,
503 "Unable to read SFP data (%x/%x/%x).\n", rval, 505 "Unable to read SFP data (%x/%x/%x).\n", rval,
504 addr, offset); 506 addr, offset);
507
505 count = 0; 508 count = 0;
506 break; 509 break;
507 } 510 }
@@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
538 type = simple_strtol(buf, NULL, 10); 541 type = simple_strtol(buf, NULL, 10);
539 switch (type) { 542 switch (type) {
540 case 0x2025c: 543 case 0x2025c:
541 qla_printk(KERN_INFO, ha, 544 ql_log(ql_log_info, vha, 0x706e,
542 "Issuing ISP reset on (%ld).\n", vha->host_no); 545 "Issuing ISP reset.\n");
543 546
544 scsi_block_requests(vha->host); 547 scsi_block_requests(vha->host);
545 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 548 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
551 if (!IS_QLA81XX(ha)) 554 if (!IS_QLA81XX(ha))
552 break; 555 break;
553 556
554 qla_printk(KERN_INFO, ha, 557 ql_log(ql_log_info, vha, 0x706f,
555 "Issuing MPI reset on (%ld).\n", vha->host_no); 558 "Issuing MPI reset.\n");
556 559
557 /* Make sure FC side is not in reset */ 560 /* Make sure FC side is not in reset */
558 qla2x00_wait_for_hba_online(vha); 561 qla2x00_wait_for_hba_online(vha);
@@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
560 /* Issue MPI reset */ 563 /* Issue MPI reset */
561 scsi_block_requests(vha->host); 564 scsi_block_requests(vha->host);
562 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 565 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
563 qla_printk(KERN_WARNING, ha, 566 ql_log(ql_log_warn, vha, 0x7070,
564 "MPI reset failed on (%ld).\n", vha->host_no); 567 "MPI reset failed.\n");
565 scsi_unblock_requests(vha->host); 568 scsi_unblock_requests(vha->host);
566 break; 569 break;
567 case 0x2025e: 570 case 0x2025e:
568 if (!IS_QLA82XX(ha) || vha != base_vha) { 571 if (!IS_QLA82XX(ha) || vha != base_vha) {
569 qla_printk(KERN_INFO, ha, 572 ql_log(ql_log_info, vha, 0x7071,
570 "FCoE ctx reset not supported for host%ld.\n", 573 "FCoE ctx reset no supported.\n");
571 vha->host_no);
572 return count; 574 return count;
573 } 575 }
574 576
575 qla_printk(KERN_INFO, ha, 577 ql_log(ql_log_info, vha, 0x7072,
576 "Issuing FCoE CTX reset on host%ld.\n", vha->host_no); 578 "Issuing FCoE ctx reset.\n");
577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 579 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
578 qla2xxx_wake_dpc(vha); 580 qla2xxx_wake_dpc(vha);
579 qla2x00_wait_for_fcoe_ctx_reset(vha); 581 qla2x00_wait_for_fcoe_ctx_reset(vha);
@@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
611 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 613 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
612 &ha->edc_data_dma); 614 &ha->edc_data_dma);
613 if (!ha->edc_data) { 615 if (!ha->edc_data) {
614 DEBUG2(qla_printk(KERN_INFO, ha, 616 ql_log(ql_log_warn, vha, 0x7073,
615 "Unable to allocate memory for EDC write.\n")); 617 "Unable to allocate memory for EDC write.\n");
616 return 0; 618 return 0;
617 } 619 }
618 } 620 }
@@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
631 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data, 633 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
632 dev, adr, len, opt); 634 dev, adr, len, opt);
633 if (rval != QLA_SUCCESS) { 635 if (rval != QLA_SUCCESS) {
634 DEBUG2(qla_printk(KERN_INFO, ha, 636 ql_log(ql_log_warn, vha, 0x7074,
635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 637 "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
636 rval, dev, adr, opt, len, buf[8])); 638 rval, dev, adr, opt, len, buf[8]);
637 return 0; 639 return 0;
638 } 640 }
639 641
@@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
669 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 671 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
670 &ha->edc_data_dma); 672 &ha->edc_data_dma);
671 if (!ha->edc_data) { 673 if (!ha->edc_data) {
672 DEBUG2(qla_printk(KERN_INFO, ha, 674 ql_log(ql_log_warn, vha, 0x708c,
673 "Unable to allocate memory for EDC status.\n")); 675 "Unable to allocate memory for EDC status.\n");
674 return 0; 676 return 0;
675 } 677 }
676 } 678 }
@@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
688 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data, 690 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
689 dev, adr, len, opt); 691 dev, adr, len, opt);
690 if (rval != QLA_SUCCESS) { 692 if (rval != QLA_SUCCESS) {
691 DEBUG2(qla_printk(KERN_INFO, ha, 693 ql_log(ql_log_info, vha, 0x7075,
692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 694 "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
693 rval, dev, adr, opt, len)); 695 rval, dev, adr, opt, len);
694 return 0; 696 return 0;
695 } 697 }
696 698
@@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
749 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 751 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
750 &ha->xgmac_data_dma, GFP_KERNEL); 752 &ha->xgmac_data_dma, GFP_KERNEL);
751 if (!ha->xgmac_data) { 753 if (!ha->xgmac_data) {
752 qla_printk(KERN_WARNING, ha, 754 ql_log(ql_log_warn, vha, 0x7076,
753 "Unable to allocate memory for XGMAC read-data.\n"); 755 "Unable to allocate memory for XGMAC read-data.\n");
754 return 0; 756 return 0;
755 } 757 }
@@ -761,7 +763,7 @@ do_read:
761 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 763 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
762 XGMAC_DATA_SIZE, &actual_size); 764 XGMAC_DATA_SIZE, &actual_size);
763 if (rval != QLA_SUCCESS) { 765 if (rval != QLA_SUCCESS) {
764 qla_printk(KERN_WARNING, ha, 766 ql_log(ql_log_warn, vha, 0x7077,
765 "Unable to read XGMAC data (%x).\n", rval); 767 "Unable to read XGMAC data (%x).\n", rval);
766 count = 0; 768 count = 0;
767 } 769 }
@@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
801 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 803 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
802 &ha->dcbx_tlv_dma, GFP_KERNEL); 804 &ha->dcbx_tlv_dma, GFP_KERNEL);
803 if (!ha->dcbx_tlv) { 805 if (!ha->dcbx_tlv) {
804 qla_printk(KERN_WARNING, ha, 806 ql_log(ql_log_warn, vha, 0x7078,
805 "Unable to allocate memory for DCBX TLV read-data.\n"); 807 "Unable to allocate memory for DCBX TLV read-data.\n");
806 return 0; 808 return 0;
807 } 809 }
@@ -813,8 +815,8 @@ do_read:
813 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 815 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
814 DCBX_TLV_DATA_SIZE); 816 DCBX_TLV_DATA_SIZE);
815 if (rval != QLA_SUCCESS) { 817 if (rval != QLA_SUCCESS) {
816 qla_printk(KERN_WARNING, ha, 818 ql_log(ql_log_warn, vha, 0x7079,
817 "Unable to read DCBX TLV data (%x).\n", rval); 819 "Unable to read DCBX TLV (%x).\n", rval);
818 count = 0; 820 count = 0;
819 } 821 }
820 822
@@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
869 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 871 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
870 iter->attr); 872 iter->attr);
871 if (ret) 873 if (ret)
872 qla_printk(KERN_INFO, vha->hw, 874 ql_log(ql_log_warn, vha, 0x00f3,
873 "Unable to create sysfs %s binary attribute " 875 "Unable to create sysfs %s binary attribute (%d).\n",
874 "(%d).\n", iter->name, ret); 876 iter->name, ret);
877 else
878 ql_dbg(ql_dbg_init, vha, 0x00f4,
879 "Successfully created sysfs %s binary attribure.\n",
880 iter->name);
875 } 881 }
876} 882}
877 883
@@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1126 return -EPERM; 1132 return -EPERM;
1127 1133
1128 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 1134 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1129 qla_printk(KERN_WARNING, ha, 1135 ql_log(ql_log_warn, vha, 0x707a,
1130 "Abort ISP active -- ignoring beacon request.\n"); 1136 "Abort ISP active -- ignoring beacon request.\n");
1131 return -EBUSY; 1137 return -EBUSY;
1132 } 1138 }
@@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
1322 temp = frac = 0; 1328 temp = frac = 0;
1323 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1329 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1324 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1330 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1325 DEBUG2_3_11(printk(KERN_WARNING 1331 ql_log(ql_log_warn, vha, 0x707b,
1326 "%s(%ld): isp reset in progress.\n", 1332 "ISP reset active.\n");
1327 __func__, vha->host_no));
1328 else if (!vha->hw->flags.eeh_busy) 1333 else if (!vha->hw->flags.eeh_busy)
1329 rval = qla2x00_get_thermal_temp(vha, &temp, &frac); 1334 rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1330 if (rval != QLA_SUCCESS) 1335 if (rval != QLA_SUCCESS)
@@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1343 1348
1344 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1349 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1345 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1350 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1346 DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n", 1351 ql_log(ql_log_warn, vha, 0x707c,
1347 __func__, vha->host_no)); 1352 "ISP reset active.\n");
1348 else if (!vha->hw->flags.eeh_busy) 1353 else if (!vha->hw->flags.eeh_busy)
1349 rval = qla2x00_get_firmware_state(vha, state); 1354 rval = qla2x00_get_firmware_state(vha, state);
1350 if (rval != QLA_SUCCESS) 1355 if (rval != QLA_SUCCESS)
@@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1645 1650
1646 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1651 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1647 if (stats == NULL) { 1652 if (stats == NULL) {
1648 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1653 ql_log(ql_log_warn, vha, 0x707d,
1649 __func__, base_vha->host_no)); 1654 "Failed to allocate memory for stats.\n");
1650 goto done; 1655 goto done;
1651 } 1656 }
1652 memset(stats, 0, DMA_POOL_SIZE); 1657 memset(stats, 0, DMA_POOL_SIZE);
@@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1746 1751
1747 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1752 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1748 if (ret) { 1753 if (ret) {
1749 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, " 1754 ql_log(ql_log_warn, vha, 0x707e,
1750 "status %x\n", ret)); 1755 "Vport sanity check failed, status %x\n", ret);
1751 return (ret); 1756 return (ret);
1752 } 1757 }
1753 1758
1754 vha = qla24xx_create_vhost(fc_vport); 1759 vha = qla24xx_create_vhost(fc_vport);
1755 if (vha == NULL) { 1760 if (vha == NULL) {
1756 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n", 1761 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1757 vha));
1758 return FC_VPORT_FAILED; 1762 return FC_VPORT_FAILED;
1759 } 1763 }
1760 if (disable) { 1764 if (disable) {
@@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1764 atomic_set(&vha->vp_state, VP_FAILED); 1768 atomic_set(&vha->vp_state, VP_FAILED);
1765 1769
1766 /* ready to create vport */ 1770 /* ready to create vport */
1767 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n", 1771 ql_log(ql_log_info, vha, 0x7080,
1768 vha->vp_idx); 1772 "VP entry id %d assigned.\n", vha->vp_idx);
1769 1773
1770 /* initialized vport states */ 1774 /* initialized vport states */
1771 atomic_set(&vha->loop_state, LOOP_DOWN); 1775 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1775 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 1779 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1776 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1780 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1777 /* Don't retry or attempt login of this virtual port */ 1781 /* Don't retry or attempt login of this virtual port */
1778 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1782 ql_dbg(ql_dbg_user, vha, 0x7081,
1779 base_vha->host_no)); 1783 "Vport loop state is not UP.\n");
1780 atomic_set(&vha->loop_state, LOOP_DEAD); 1784 atomic_set(&vha->loop_state, LOOP_DEAD);
1781 if (!disable) 1785 if (!disable)
1782 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1785 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
1786 if (ha->fw_attributes & BIT_4) { 1790 if (ha->fw_attributes & BIT_4) {
1787 vha->flags.difdix_supported = 1; 1791 vha->flags.difdix_supported = 1;
1788 DEBUG18(qla_printk(KERN_INFO, ha, 1792 ql_dbg(ql_dbg_user, vha, 0x7082,
1789 "Registering for DIF/DIX type 1 and 3" 1793 "Registered for DIF/DIX type 1 and 3 protection.\n");
1790 " protection.\n"));
1791 scsi_host_set_prot(vha->host, 1794 scsi_host_set_prot(vha->host,
1792 SHOST_DIF_TYPE1_PROTECTION 1795 SHOST_DIF_TYPE1_PROTECTION
1793 | SHOST_DIF_TYPE2_PROTECTION 1796 | SHOST_DIF_TYPE2_PROTECTION
@@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1802 1805
1803 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1806 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1804 &ha->pdev->dev)) { 1807 &ha->pdev->dev)) {
1805 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1808 ql_dbg(ql_dbg_user, vha, 0x7083,
1806 vha->host_no, vha->vp_idx)); 1809 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1807 goto vport_create_failed_2; 1810 goto vport_create_failed_2;
1808 } 1811 }
1809 1812
@@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1820 1823
1821 if (ha->flags.cpu_affinity_enabled) { 1824 if (ha->flags.cpu_affinity_enabled) {
1822 req = ha->req_q_map[1]; 1825 req = ha->req_q_map[1];
1826 ql_dbg(ql_dbg_multiq, vha, 0xc000,
1827 "Request queue %p attached with "
1828 "VP[%d], cpu affinity =%d\n",
1829 req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1823 goto vport_queue; 1830 goto vport_queue;
1824 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1831 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1825 goto vport_queue; 1832 goto vport_queue;
@@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1836 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1843 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1837 qos); 1844 qos);
1838 if (!ret) 1845 if (!ret)
1839 qla_printk(KERN_WARNING, ha, 1846 ql_log(ql_log_warn, vha, 0x7084,
1840 "Can't create request queue for vp_idx:%d\n", 1847 "Can't create request queue for VP[%d]\n",
1841 vha->vp_idx); 1848 vha->vp_idx);
1842 else { 1849 else {
1843 DEBUG2(qla_printk(KERN_INFO, ha, 1850 ql_dbg(ql_dbg_multiq, vha, 0xc001,
1844 "Request Que:%d (QoS: %d) created for vp_idx:%d\n", 1851 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1845 ret, qos, vha->vp_idx)); 1852 ret, qos, vha->vp_idx);
1853 ql_dbg(ql_dbg_user, vha, 0x7085,
1854 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1855 ret, qos, vha->vp_idx);
1846 req = ha->req_q_map[ret]; 1856 req = ha->req_q_map[ret];
1847 } 1857 }
1848 } 1858 }
@@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1882 1892
1883 if (vha->timer_active) { 1893 if (vha->timer_active) {
1884 qla2x00_vp_stop_timer(vha); 1894 qla2x00_vp_stop_timer(vha);
1885 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" 1895 ql_dbg(ql_dbg_user, vha, 0x7086,
1886 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); 1896 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1887 } 1897 }
1888 1898
1889 /* No pending activities shall be there on the vha now */ 1899 /* No pending activities shall be there on the vha now */
1890 DEBUG(msleep(random32()%10)); /* Just to see if something falls on 1900 if (ql2xextended_error_logging & ql_dbg_user)
1901 msleep(random32()%10); /* Just to see if something falls on
1891 * the net we have placed below */ 1902 * the net we have placed below */
1892 1903
1893 BUG_ON(atomic_read(&vha->vref_count)); 1904 BUG_ON(atomic_read(&vha->vref_count));
@@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1901 1912
1902 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1913 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1903 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1914 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1904 qla_printk(KERN_WARNING, ha, 1915 ql_log(ql_log_warn, vha, 0x7087,
1905 "Queue delete failed.\n"); 1916 "Queue delete failed.\n");
1906 } 1917 }
1907 1918
1908 scsi_host_put(vha->host); 1919 scsi_host_put(vha->host);
1909 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); 1920 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1910 return 0; 1921 return 0;
1911} 1922}
1912 1923