aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
authorSaurav Kashyap <saurav.kashyap@qlogic.com>2011-07-14 15:00:13 -0400
committerJames Bottomley <JBottomley@Parallels.com>2011-07-27 06:16:17 -0400
commit7c3df1320e5e875478775e78d01a09aee96b8abe (patch)
tree215326b999b3db03f4a2268a79c3848803daaf7d /drivers/scsi/qla2xxx/qla_os.c
parent3ce8866ceae87258cf66d1f7fd72abc918753cec (diff)
[SCSI] qla2xxx: Code changes to support new dynamic logging infrastructure.
The code is changed to support the new dynamic logging infrastructure. Following are the levels added. Default is 0 - no logging. 0x40000000 - Module Init & Probe. 0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery. 0x08000000 - IO tracing. 0x04000000 - DPC Thread. 0x02000000 - Async events. 0x01000000 - Timer routines. 0x00800000 - User space. 0x00400000 - Task Management. 0x00200000 - AER/EEH. 0x00100000 - Multi Q. 0x00080000 - P3P Specific. 0x00040000 - Virtual Port. 0x00020000 - Buffer Dump. 0x00010000 - Misc. 0x7fffffff - For enabling all logs, can be too many logs. Setting ql2xextended_error_logging module parameter to any of the above value, will enable the debug for that particular level. Do LOGICAL OR of the value to enable more than one level. Signed-off-by: Saurav Kashyap <saurav.kashyap@qlogic.com> Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com> Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com> Signed-off-by: Joe Carnuccio <joe.carnuccio@qlogic.com> Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com> Signed-off-by: Madhuranath Iyengar <Madhu.Iyengar@qlogic.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c730
1 files changed, 459 insertions, 271 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8aa05c87b65..e02df276804 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -141,8 +141,8 @@ MODULE_PARM_DESC(ql2xmultique_tag,
141int ql2xfwloadbin; 141int ql2xfwloadbin;
142module_param(ql2xfwloadbin, int, S_IRUGO); 142module_param(ql2xfwloadbin, int, S_IRUGO);
143MODULE_PARM_DESC(ql2xfwloadbin, 143MODULE_PARM_DESC(ql2xfwloadbin,
144 "Option to specify location from which to load ISP firmware:\n" 144 "Option to specify location from which to load ISP firmware:.\n"
145 " 2 -- load firmware via the request_firmware() (hotplug)\n" 145 " 2 -- load firmware via the request_firmware() (hotplug).\n"
146 " interface.\n" 146 " interface.\n"
147 " 1 -- load firmware from flash.\n" 147 " 1 -- load firmware from flash.\n"
148 " 0 -- use default semantics.\n"); 148 " 0 -- use default semantics.\n");
@@ -156,7 +156,7 @@ MODULE_PARM_DESC(ql2xetsenable,
156int ql2xdbwr = 1; 156int ql2xdbwr = 1;
157module_param(ql2xdbwr, int, S_IRUGO); 157module_param(ql2xdbwr, int, S_IRUGO);
158MODULE_PARM_DESC(ql2xdbwr, 158MODULE_PARM_DESC(ql2xdbwr,
159 "Option to specify scheme for request queue posting\n" 159 "Option to specify scheme for request queue posting.\n"
160 " 0 -- Regular doorbell.\n" 160 " 0 -- Regular doorbell.\n"
161 " 1 -- CAMRAM doorbell (faster).\n"); 161 " 1 -- CAMRAM doorbell (faster).\n");
162 162
@@ -181,7 +181,7 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
181int ql2xdontresethba; 181int ql2xdontresethba;
182module_param(ql2xdontresethba, int, S_IRUGO); 182module_param(ql2xdontresethba, int, S_IRUGO);
183MODULE_PARM_DESC(ql2xdontresethba, 183MODULE_PARM_DESC(ql2xdontresethba,
184 "Option to specify reset behaviour\n" 184 "Option to specify reset behaviour.\n"
185 " 0 (Default) -- Reset on failure.\n" 185 " 0 (Default) -- Reset on failure.\n"
186 " 1 -- Do not reset on failure.\n"); 186 " 1 -- Do not reset on failure.\n");
187 187
@@ -260,8 +260,11 @@ static inline void
260qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 260qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
261{ 261{
262 /* Currently used for 82XX only. */ 262 /* Currently used for 82XX only. */
263 if (vha->device_flags & DFLG_DEV_FAILED) 263 if (vha->device_flags & DFLG_DEV_FAILED) {
264 ql_dbg(ql_dbg_timer, vha, 0x600d,
265 "Device in a failed state, returning.\n");
264 return; 266 return;
267 }
265 268
266 mod_timer(&vha->timer, jiffies + interval * HZ); 269 mod_timer(&vha->timer, jiffies + interval * HZ);
267} 270}
@@ -286,19 +289,20 @@ static void qla2x00_sp_free_dma(srb_t *);
286/* -------------------------------------------------------------------------- */ 289/* -------------------------------------------------------------------------- */
287static int qla2x00_alloc_queues(struct qla_hw_data *ha) 290static int qla2x00_alloc_queues(struct qla_hw_data *ha)
288{ 291{
292 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
289 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 293 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
290 GFP_KERNEL); 294 GFP_KERNEL);
291 if (!ha->req_q_map) { 295 if (!ha->req_q_map) {
292 qla_printk(KERN_WARNING, ha, 296 ql_log(ql_log_fatal, vha, 0x003b,
293 "Unable to allocate memory for request queue ptrs\n"); 297 "Unable to allocate memory for request queue ptrs.\n");
294 goto fail_req_map; 298 goto fail_req_map;
295 } 299 }
296 300
297 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 301 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
298 GFP_KERNEL); 302 GFP_KERNEL);
299 if (!ha->rsp_q_map) { 303 if (!ha->rsp_q_map) {
300 qla_printk(KERN_WARNING, ha, 304 ql_log(ql_log_fatal, vha, 0x003c,
301 "Unable to allocate memory for response queue ptrs\n"); 305 "Unable to allocate memory for response queue ptrs.\n");
302 goto fail_rsp_map; 306 goto fail_rsp_map;
303 } 307 }
304 set_bit(0, ha->rsp_qid_map); 308 set_bit(0, ha->rsp_qid_map);
@@ -362,8 +366,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
362 struct qla_hw_data *ha = vha->hw; 366 struct qla_hw_data *ha = vha->hw;
363 367
364 if (!(ha->fw_attributes & BIT_6)) { 368 if (!(ha->fw_attributes & BIT_6)) {
365 qla_printk(KERN_INFO, ha, 369 ql_log(ql_log_warn, vha, 0x00d8,
366 "Firmware is not multi-queue capable\n"); 370 "Firmware is not multi-queue capable.\n");
367 goto fail; 371 goto fail;
368 } 372 }
369 if (ql2xmultique_tag) { 373 if (ql2xmultique_tag) {
@@ -372,8 +376,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
372 req = qla25xx_create_req_que(ha, options, 0, 0, -1, 376 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
373 QLA_DEFAULT_QUE_QOS); 377 QLA_DEFAULT_QUE_QOS);
374 if (!req) { 378 if (!req) {
375 qla_printk(KERN_WARNING, ha, 379 ql_log(ql_log_warn, vha, 0x00e0,
376 "Can't create request queue\n"); 380 "Failed to create request queue.\n");
377 goto fail; 381 goto fail;
378 } 382 }
379 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 383 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
@@ -382,17 +386,20 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
382 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 386 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
383 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); 387 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
384 if (!ret) { 388 if (!ret) {
385 qla_printk(KERN_WARNING, ha, 389 ql_log(ql_log_warn, vha, 0x00e8,
386 "Response Queue create failed\n"); 390 "Failed to create response queue.\n");
387 goto fail2; 391 goto fail2;
388 } 392 }
389 } 393 }
390 ha->flags.cpu_affinity_enabled = 1; 394 ha->flags.cpu_affinity_enabled = 1;
391 395 ql_dbg(ql_dbg_multiq, vha, 0xc007,
392 DEBUG2(qla_printk(KERN_INFO, ha, 396 "CPU affinity mode enalbed, "
393 "CPU affinity mode enabled, no. of response" 397 "no. of response queues:%d no. of request queues:%d.\n",
394 " queues:%d, no. of request queues:%d\n", 398 ha->max_rsp_queues, ha->max_req_queues);
395 ha->max_rsp_queues, ha->max_req_queues)); 399 ql_dbg(ql_dbg_init, vha, 0x00e9,
400 "CPU affinity mode enalbed, "
401 "no. of response queues:%d no. of request queues:%d.\n",
402 ha->max_rsp_queues, ha->max_req_queues);
396 } 403 }
397 return 0; 404 return 0;
398fail2: 405fail2:
@@ -539,8 +546,11 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
539 struct qla_hw_data *ha = vha->hw; 546 struct qla_hw_data *ha = vha->hw;
540 547
541 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 548 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
542 if (!sp) 549 if (!sp) {
550 ql_log(ql_log_warn, vha, 0x3006,
551 "Memory allocation failed for sp.\n");
543 return sp; 552 return sp;
553 }
544 554
545 atomic_set(&sp->ref_count, 1); 555 atomic_set(&sp->ref_count, 1);
546 sp->fcport = fcport; 556 sp->fcport = fcport;
@@ -564,30 +574,43 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
564 int rval; 574 int rval;
565 575
566 if (ha->flags.eeh_busy) { 576 if (ha->flags.eeh_busy) {
567 if (ha->flags.pci_channel_io_perm_failure) 577 if (ha->flags.pci_channel_io_perm_failure) {
578 ql_dbg(ql_dbg_io, vha, 0x3001,
579 "PCI Channel IO permanent failure, exiting "
580 "cmd=%p.\n", cmd);
568 cmd->result = DID_NO_CONNECT << 16; 581 cmd->result = DID_NO_CONNECT << 16;
569 else 582 } else {
583 ql_dbg(ql_dbg_io, vha, 0x3002,
584 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
570 cmd->result = DID_REQUEUE << 16; 585 cmd->result = DID_REQUEUE << 16;
586 }
571 goto qc24_fail_command; 587 goto qc24_fail_command;
572 } 588 }
573 589
574 rval = fc_remote_port_chkready(rport); 590 rval = fc_remote_port_chkready(rport);
575 if (rval) { 591 if (rval) {
576 cmd->result = rval; 592 cmd->result = rval;
593 ql_dbg(ql_dbg_io, vha, 0x3003,
594 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
595 cmd, rval);
577 goto qc24_fail_command; 596 goto qc24_fail_command;
578 } 597 }
579 598
580 if (!vha->flags.difdix_supported && 599 if (!vha->flags.difdix_supported &&
581 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 600 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
582 DEBUG2(qla_printk(KERN_ERR, ha, 601 ql_dbg(ql_dbg_io, vha, 0x3004,
583 "DIF Cap Not Reg, fail DIF capable cmd's:%x\n", 602 "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
584 cmd->cmnd[0])); 603 cmd);
585 cmd->result = DID_NO_CONNECT << 16; 604 cmd->result = DID_NO_CONNECT << 16;
586 goto qc24_fail_command; 605 goto qc24_fail_command;
587 } 606 }
588 if (atomic_read(&fcport->state) != FCS_ONLINE) { 607 if (atomic_read(&fcport->state) != FCS_ONLINE) {
589 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 608 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
590 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 609 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
610 ql_dbg(ql_dbg_io, vha, 0x3005,
611 "Returning DNC, fcport_state=%d loop_state=%d.\n",
612 atomic_read(&fcport->state),
613 atomic_read(&base_vha->loop_state));
591 cmd->result = DID_NO_CONNECT << 16; 614 cmd->result = DID_NO_CONNECT << 16;
592 goto qc24_fail_command; 615 goto qc24_fail_command;
593 } 616 }
@@ -599,8 +622,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
599 goto qc24_host_busy; 622 goto qc24_host_busy;
600 623
601 rval = ha->isp_ops->start_scsi(sp); 624 rval = ha->isp_ops->start_scsi(sp);
602 if (rval != QLA_SUCCESS) 625 if (rval != QLA_SUCCESS) {
626 ql_dbg(ql_dbg_io, vha, 0x3013,
627 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
603 goto qc24_host_busy_free_sp; 628 goto qc24_host_busy_free_sp;
629 }
604 630
605 return 0; 631 return 0;
606 632
@@ -643,7 +669,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
643 int ret = QLA_SUCCESS; 669 int ret = QLA_SUCCESS;
644 670
645 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 671 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
646 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); 672 ql_dbg(ql_dbg_taskm, vha, 0x8005,
673 "Return:eh_wait.\n");
647 return ret; 674 return ret;
648 } 675 }
649 676
@@ -736,7 +763,8 @@ qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
736 else 763 else
737 return_status = QLA_FUNCTION_FAILED; 764 return_status = QLA_FUNCTION_FAILED;
738 765
739 DEBUG2(printk("%s return_status=%d\n", __func__, return_status)); 766 ql_dbg(ql_dbg_taskm, vha, 0x8019,
767 "%s return status=%d.\n", __func__, return_status);
740 768
741 return return_status; 769 return return_status;
742} 770}
@@ -844,10 +872,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
844 int wait = 0; 872 int wait = 0;
845 struct qla_hw_data *ha = vha->hw; 873 struct qla_hw_data *ha = vha->hw;
846 874
875 ql_dbg(ql_dbg_taskm, vha, 0x8000,
876 "Entered %s for cmd=%p.\n", __func__, cmd);
847 if (!CMD_SP(cmd)) 877 if (!CMD_SP(cmd))
848 return SUCCESS; 878 return SUCCESS;
849 879
850 ret = fc_block_scsi_eh(cmd); 880 ret = fc_block_scsi_eh(cmd);
881 ql_dbg(ql_dbg_taskm, vha, 0x8001,
882 "Return value of fc_block_scsi_eh=%d.\n", ret);
851 if (ret != 0) 883 if (ret != 0)
852 return ret; 884 return ret;
853 ret = SUCCESS; 885 ret = SUCCESS;
@@ -862,20 +894,19 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
862 return SUCCESS; 894 return SUCCESS;
863 } 895 }
864 896
865 DEBUG2(printk("%s(%ld): aborting sp %p from RISC.", 897 ql_dbg(ql_dbg_taskm, vha, 0x8002,
866 __func__, vha->host_no, sp)); 898 "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
867 899
868 /* Get a reference to the sp and drop the lock.*/ 900 /* Get a reference to the sp and drop the lock.*/
869 sp_get(sp); 901 sp_get(sp);
870 902
871 spin_unlock_irqrestore(&ha->hardware_lock, flags); 903 spin_unlock_irqrestore(&ha->hardware_lock, flags);
872 if (ha->isp_ops->abort_command(sp)) { 904 if (ha->isp_ops->abort_command(sp)) {
873 DEBUG2(printk("%s(%ld): abort_command " 905 ql_dbg(ql_dbg_taskm, vha, 0x8003,
874 "mbx failed.\n", __func__, vha->host_no)); 906 "Abort command mbx failed for cmd=%p.\n", cmd);
875 ret = FAILED;
876 } else { 907 } else {
877 DEBUG3(printk("%s(%ld): abort_command " 908 ql_dbg(ql_dbg_taskm, vha, 0x8004,
878 "mbx success.\n", __func__, vha->host_no)); 909 "Abort command mbx success.\n");
879 wait = 1; 910 wait = 1;
880 } 911 }
881 qla2x00_sp_compl(ha, sp); 912 qla2x00_sp_compl(ha, sp);
@@ -883,16 +914,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
883 /* Wait for the command to be returned. */ 914 /* Wait for the command to be returned. */
884 if (wait) { 915 if (wait) {
885 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 916 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
886 qla_printk(KERN_ERR, ha, 917 ql_log(ql_log_warn, vha, 0x8006,
887 "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n", 918 "Abort handler timed out for cmd=%p.\n", cmd);
888 vha->host_no, id, lun, ret);
889 ret = FAILED; 919 ret = FAILED;
890 } 920 }
891 } 921 }
892 922
893 qla_printk(KERN_INFO, ha, 923 ql_log(ql_log_info, vha, 0x801c,
894 "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n", 924 "Abort command issued -- %d %x.\n", wait, ret);
895 vha->host_no, id, lun, wait, ret);
896 925
897 return ret; 926 return ret;
898} 927}
@@ -960,40 +989,59 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
960 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 989 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
961 int err; 990 int err;
962 991
963 if (!fcport) 992 if (!fcport) {
993 ql_log(ql_log_warn, vha, 0x8007,
994 "fcport is NULL.\n");
964 return FAILED; 995 return FAILED;
996 }
965 997
966 err = fc_block_scsi_eh(cmd); 998 err = fc_block_scsi_eh(cmd);
999 ql_dbg(ql_dbg_taskm, vha, 0x8008,
1000 "fc_block_scsi_eh ret=%d.\n", err);
967 if (err != 0) 1001 if (err != 0)
968 return err; 1002 return err;
969 1003
970 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 1004 ql_log(ql_log_info, vha, 0x8009,
971 vha->host_no, cmd->device->id, cmd->device->lun, name); 1005 "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
1006 cmd->device->id, cmd->device->lun, cmd);
972 1007
973 err = 0; 1008 err = 0;
974 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1009 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1010 ql_log(ql_log_warn, vha, 0x800a,
1011 "Wait for hba online failed for cmd=%p.\n", cmd);
975 goto eh_reset_failed; 1012 goto eh_reset_failed;
1013 }
976 err = 1; 1014 err = 1;
977 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 1015 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
1016 ql_log(ql_log_warn, vha, 0x800b,
1017 "Wait for loop ready failed for cmd=%p.\n", cmd);
978 goto eh_reset_failed; 1018 goto eh_reset_failed;
1019 }
979 err = 2; 1020 err = 2;
980 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1021 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
981 != QLA_SUCCESS) 1022 != QLA_SUCCESS) {
1023 ql_log(ql_log_warn, vha, 0x800c,
1024 "do_reset failed for cmd=%p.\n", cmd);
982 goto eh_reset_failed; 1025 goto eh_reset_failed;
1026 }
983 err = 3; 1027 err = 3;
984 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1028 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
985 cmd->device->lun, type) != QLA_SUCCESS) 1029 cmd->device->lun, type) != QLA_SUCCESS) {
1030 ql_log(ql_log_warn, vha, 0x800d,
1031 "wait for peding cmds failed for cmd=%p.\n", cmd);
986 goto eh_reset_failed; 1032 goto eh_reset_failed;
1033 }
987 1034
988 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 1035 ql_log(ql_log_info, vha, 0x800e,
989 vha->host_no, cmd->device->id, cmd->device->lun, name); 1036 "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
1037 cmd->device->id, cmd->device->lun, cmd);
990 1038
991 return SUCCESS; 1039 return SUCCESS;
992 1040
993eh_reset_failed: 1041eh_reset_failed:
994 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n" 1042 ql_log(ql_log_info, vha, 0x800f,
995 , vha->host_no, cmd->device->id, cmd->device->lun, name, 1043 "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
996 reset_errors[err]); 1044 reset_errors[err], cmd->device->id, cmd->device->lun);
997 return FAILED; 1045 return FAILED;
998} 1046}
999 1047
@@ -1043,19 +1091,25 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1043 id = cmd->device->id; 1091 id = cmd->device->id;
1044 lun = cmd->device->lun; 1092 lun = cmd->device->lun;
1045 1093
1046 if (!fcport) 1094 if (!fcport) {
1095 ql_log(ql_log_warn, vha, 0x8010,
1096 "fcport is NULL.\n");
1047 return ret; 1097 return ret;
1098 }
1048 1099
1049 ret = fc_block_scsi_eh(cmd); 1100 ret = fc_block_scsi_eh(cmd);
1101 ql_dbg(ql_dbg_taskm, vha, 0x8011,
1102 "fc_block_scsi_eh ret=%d.\n", ret);
1050 if (ret != 0) 1103 if (ret != 0)
1051 return ret; 1104 return ret;
1052 ret = FAILED; 1105 ret = FAILED;
1053 1106
1054 qla_printk(KERN_INFO, vha->hw, 1107 ql_log(ql_log_info, vha, 0x8012,
1055 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun); 1108 "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
1056 1109
1057 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1110 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1058 DEBUG2(printk("%s failed:board disabled\n",__func__)); 1111 ql_log(ql_log_fatal, vha, 0x8013,
1112 "Wait for hba online failed board disabled.\n");
1059 goto eh_bus_reset_done; 1113 goto eh_bus_reset_done;
1060 } 1114 }
1061 1115
@@ -1068,12 +1122,15 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1068 1122
1069 /* Flush outstanding commands. */ 1123 /* Flush outstanding commands. */
1070 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1124 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1071 QLA_SUCCESS) 1125 QLA_SUCCESS) {
1126 ql_log(ql_log_warn, vha, 0x8014,
1127 "Wait for pending commands failed.\n");
1072 ret = FAILED; 1128 ret = FAILED;
1129 }
1073 1130
1074eh_bus_reset_done: 1131eh_bus_reset_done:
1075 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__, 1132 ql_log(ql_log_warn, vha, 0x802b,
1076 (ret == FAILED) ? "failed" : "succeeded"); 1133 "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
1077 1134
1078 return ret; 1135 return ret;
1079} 1136}
@@ -1106,16 +1163,21 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1106 id = cmd->device->id; 1163 id = cmd->device->id;
1107 lun = cmd->device->lun; 1164 lun = cmd->device->lun;
1108 1165
1109 if (!fcport) 1166 if (!fcport) {
1167 ql_log(ql_log_warn, vha, 0x8016,
1168 "fcport is NULL.\n");
1110 return ret; 1169 return ret;
1170 }
1111 1171
1112 ret = fc_block_scsi_eh(cmd); 1172 ret = fc_block_scsi_eh(cmd);
1173 ql_dbg(ql_dbg_taskm, vha, 0x8017,
1174 "fc_block_scsi_eh ret=%d.\n", ret);
1113 if (ret != 0) 1175 if (ret != 0)
1114 return ret; 1176 return ret;
1115 ret = FAILED; 1177 ret = FAILED;
1116 1178
1117 qla_printk(KERN_INFO, ha, 1179 ql_log(ql_log_info, vha, 0x8018,
1118 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); 1180 "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
1119 1181
1120 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1182 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1121 goto eh_host_reset_lock; 1183 goto eh_host_reset_lock;
@@ -1150,8 +1212,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1150 /* failed. schedule dpc to try */ 1212 /* failed. schedule dpc to try */
1151 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1213 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1152 1214
1153 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1215 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1216 ql_log(ql_log_warn, vha, 0x802a,
1217 "wait for hba online failed.\n");
1154 goto eh_host_reset_lock; 1218 goto eh_host_reset_lock;
1219 }
1155 } 1220 }
1156 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1221 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1157 } 1222 }
@@ -1162,7 +1227,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1162 ret = SUCCESS; 1227 ret = SUCCESS;
1163 1228
1164eh_host_reset_lock: 1229eh_host_reset_lock:
1165 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1230 qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
1166 (ret == FAILED) ? "failed" : "succeeded"); 1231 (ret == FAILED) ? "failed" : "succeeded");
1167 1232
1168 return ret; 1233 return ret;
@@ -1192,9 +1257,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1192 1257
1193 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1258 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1194 if (ret != QLA_SUCCESS) { 1259 if (ret != QLA_SUCCESS) {
1195 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1260 ql_dbg(ql_dbg_taskm, vha, 0x802c,
1196 "target_reset=%d d_id=%x.\n", __func__, 1261 "Bus Reset failed: Target Reset=%d "
1197 vha->host_no, ret, fcport->d_id.b24)); 1262 "d_id=%x.\n", ret, fcport->d_id.b24);
1198 } 1263 }
1199 } 1264 }
1200 } 1265 }
@@ -1202,9 +1267,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1202 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) { 1267 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1203 ret = qla2x00_full_login_lip(vha); 1268 ret = qla2x00_full_login_lip(vha);
1204 if (ret != QLA_SUCCESS) { 1269 if (ret != QLA_SUCCESS) {
1205 DEBUG2_3(printk("%s(%ld): failed: " 1270 ql_dbg(ql_dbg_taskm, vha, 0x802d,
1206 "full_login_lip=%d.\n", __func__, vha->host_no, 1271 "full_login_lip=%d.\n", ret);
1207 ret));
1208 } 1272 }
1209 atomic_set(&vha->loop_state, LOOP_DOWN); 1273 atomic_set(&vha->loop_state, LOOP_DOWN);
1210 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1274 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1215,8 +1279,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1215 if (ha->flags.enable_lip_reset) { 1279 if (ha->flags.enable_lip_reset) {
1216 ret = qla2x00_lip_reset(vha); 1280 ret = qla2x00_lip_reset(vha);
1217 if (ret != QLA_SUCCESS) { 1281 if (ret != QLA_SUCCESS) {
1218 DEBUG2_3(printk("%s(%ld): failed: " 1282 ql_dbg(ql_dbg_taskm, vha, 0x802e,
1219 "lip_reset=%d.\n", __func__, vha->host_no, ret)); 1283 "lip_reset failed (%d).\n", ret);
1220 } else 1284 } else
1221 qla2x00_wait_for_loop_ready(vha); 1285 qla2x00_wait_for_loop_ready(vha);
1222 } 1286 }
@@ -1315,17 +1379,17 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1315 if (!scsi_track_queue_full(sdev, qdepth)) 1379 if (!scsi_track_queue_full(sdev, qdepth))
1316 return; 1380 return;
1317 1381
1318 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, 1382 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1319 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 1383 "Queue depth adjusted-down "
1320 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1384 "to %d for scsi(%ld:%d:%d:%d).\n",
1321 sdev->queue_depth)); 1385 sdev->queue_depth, fcport->vha->host_no,
1386 sdev->channel, sdev->id, sdev->lun);
1322} 1387}
1323 1388
1324static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) 1389static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1325{ 1390{
1326 fc_port_t *fcport = sdev->hostdata; 1391 fc_port_t *fcport = sdev->hostdata;
1327 struct scsi_qla_host *vha = fcport->vha; 1392 struct scsi_qla_host *vha = fcport->vha;
1328 struct qla_hw_data *ha = vha->hw;
1329 struct req_que *req = NULL; 1393 struct req_que *req = NULL;
1330 1394
1331 req = vha->req; 1395 req = vha->req;
@@ -1340,10 +1404,11 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1340 else 1404 else
1341 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); 1405 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1342 1406
1343 DEBUG2(qla_printk(KERN_INFO, ha, 1407 ql_dbg(ql_dbg_io, vha, 0x302a,
1344 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 1408 "Queue depth adjusted-up to %d for "
1345 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1409 "scsi(%ld:%d:%d:%d).\n",
1346 sdev->queue_depth)); 1410 sdev->queue_depth, fcport->vha->host_no,
1411 sdev->channel, sdev->id, sdev->lun);
1347} 1412}
1348 1413
1349static int 1414static int
@@ -1789,6 +1854,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1789 ha->flags.port0 = 1; 1854 ha->flags.port0 = 1;
1790 else 1855 else
1791 ha->flags.port0 = 0; 1856 ha->flags.port0 = 0;
1857 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
1858 "device_type=0x%x port=%d fw_srisc_address=%p.\n",
1859 ha->device_type, ha->flags.port0, ha->fw_srisc_address);
1792} 1860}
1793 1861
1794static int 1862static int
@@ -1803,10 +1871,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1803 1871
1804 if (pci_request_selected_regions(ha->pdev, ha->bars, 1872 if (pci_request_selected_regions(ha->pdev, ha->bars,
1805 QLA2XXX_DRIVER_NAME)) { 1873 QLA2XXX_DRIVER_NAME)) {
1806 qla_printk(KERN_WARNING, ha, 1874 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1807 "Failed to reserve PIO/MMIO regions (%s)\n", 1875 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1808 pci_name(ha->pdev)); 1876 pci_name(ha->pdev));
1809
1810 goto iospace_error_exit; 1877 goto iospace_error_exit;
1811 } 1878 }
1812 if (!(ha->bars & 1)) 1879 if (!(ha->bars & 1))
@@ -1816,39 +1883,42 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1816 pio = pci_resource_start(ha->pdev, 0); 1883 pio = pci_resource_start(ha->pdev, 0);
1817 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1884 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1818 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1885 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1819 qla_printk(KERN_WARNING, ha, 1886 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1820 "Invalid PCI I/O region size (%s)...\n", 1887 "Invalid pci I/O region size (%s).\n",
1821 pci_name(ha->pdev)); 1888 pci_name(ha->pdev));
1822 pio = 0; 1889 pio = 0;
1823 } 1890 }
1824 } else { 1891 } else {
1825 qla_printk(KERN_WARNING, ha, 1892 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1826 "region #0 not a PIO resource (%s)...\n", 1893 "Region #0 no a PIO resource (%s).\n",
1827 pci_name(ha->pdev)); 1894 pci_name(ha->pdev));
1828 pio = 0; 1895 pio = 0;
1829 } 1896 }
1830 ha->pio_address = pio; 1897 ha->pio_address = pio;
1898 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1899 "PIO address=%p.\n",
1900 ha->pio_address);
1831 1901
1832skip_pio: 1902skip_pio:
1833 /* Use MMIO operations for all accesses. */ 1903 /* Use MMIO operations for all accesses. */
1834 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1904 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1835 qla_printk(KERN_ERR, ha, 1905 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1836 "region #1 not an MMIO resource (%s), aborting\n", 1906 "Region #1 not an MMIO resource (%s), aborting.\n",
1837 pci_name(ha->pdev)); 1907 pci_name(ha->pdev));
1838 goto iospace_error_exit; 1908 goto iospace_error_exit;
1839 } 1909 }
1840 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1910 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1841 qla_printk(KERN_ERR, ha, 1911 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1842 "Invalid PCI mem region size (%s), aborting\n", 1912 "Invalid PCI mem region size (%s), aborting.\n",
1843 pci_name(ha->pdev)); 1913 pci_name(ha->pdev));
1844 goto iospace_error_exit; 1914 goto iospace_error_exit;
1845 } 1915 }
1846 1916
1847 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1917 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1848 if (!ha->iobase) { 1918 if (!ha->iobase) {
1849 qla_printk(KERN_ERR, ha, 1919 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1850 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1920 "Cannot remap MMIO (%s), aborting.\n",
1851 1921 pci_name(ha->pdev));
1852 goto iospace_error_exit; 1922 goto iospace_error_exit;
1853 } 1923 }
1854 1924
@@ -1862,6 +1932,8 @@ skip_pio:
1862 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1932 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1863 pci_resource_len(ha->pdev, 3)); 1933 pci_resource_len(ha->pdev, 3));
1864 if (ha->mqiobase) { 1934 if (ha->mqiobase) {
1935 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1936 "MQIO Base=%p.\n", ha->mqiobase);
1865 /* Read MSIX vector size of the board */ 1937 /* Read MSIX vector size of the board */
1866 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1938 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1867 ha->msix_count = msix; 1939 ha->msix_count = msix;
@@ -1874,17 +1946,24 @@ skip_pio:
1874 ha->max_req_queues = 2; 1946 ha->max_req_queues = 2;
1875 } else if (ql2xmaxqueues > 1) { 1947 } else if (ql2xmaxqueues > 1) {
1876 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1948 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1877 QLA_MQ_SIZE : ql2xmaxqueues; 1949 QLA_MQ_SIZE : ql2xmaxqueues;
1878 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" 1950 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1879 " of request queues:%d\n", ha->max_req_queues)); 1951 "QoS mode set, max no of request queues:%d.\n",
1952 ha->max_req_queues);
1953 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1954 "QoS mode set, max no of request queues:%d.\n",
1955 ha->max_req_queues);
1880 } 1956 }
1881 qla_printk(KERN_INFO, ha, 1957 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1882 "MSI-X vector count: %d\n", msix); 1958 "MSI-X vector count: %d.\n", msix);
1883 } else 1959 } else
1884 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); 1960 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1961 "BAR 3 not enabled.\n");
1885 1962
1886mqiobase_exit: 1963mqiobase_exit:
1887 ha->msix_count = ha->max_rsp_queues + 1; 1964 ha->msix_count = ha->max_rsp_queues + 1;
1965 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1966 "MSIX Count:%d.\n", ha->msix_count);
1888 return (0); 1967 return (0);
1889 1968
1890iospace_error_exit: 1969iospace_error_exit:
@@ -1948,7 +2027,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1948 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) { 2027 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1949 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2028 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1950 mem_only = 1; 2029 mem_only = 1;
2030 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2031 "Mem only adapter.\n");
1951 } 2032 }
2033 ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2034 "Bars=%d.\n", bars);
1952 2035
1953 if (mem_only) { 2036 if (mem_only) {
1954 if (pci_enable_device_mem(pdev)) 2037 if (pci_enable_device_mem(pdev))
@@ -1963,9 +2046,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1963 2046
1964 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2047 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1965 if (!ha) { 2048 if (!ha) {
1966 DEBUG(printk("Unable to allocate memory for ha\n")); 2049 ql_log_pci(ql_log_fatal, pdev, 0x0009,
2050 "Unable to allocate memory for ha.\n");
1967 goto probe_out; 2051 goto probe_out;
1968 } 2052 }
2053 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2054 "Memory allocated for ha=%p.\n", ha);
1969 ha->pdev = pdev; 2055 ha->pdev = pdev;
1970 2056
1971 /* Clear our data area */ 2057 /* Clear our data area */
@@ -1987,10 +2073,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1987 if (ret) 2073 if (ret)
1988 goto probe_hw_failed; 2074 goto probe_hw_failed;
1989 2075
1990 qla_printk(KERN_INFO, ha, 2076 ql_log_pci(ql_log_info, pdev, 0x001d,
1991 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 2077 "Found an ISP%04X irq %d iobase 0x%p.\n",
1992 ha->iobase); 2078 pdev->device, pdev->irq, ha->iobase);
1993
1994 ha->prev_topology = 0; 2079 ha->prev_topology = 0;
1995 ha->init_cb_size = sizeof(init_cb_t); 2080 ha->init_cb_size = sizeof(init_cb_t);
1996 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2081 ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2091,7 +2176,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2091 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2176 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2092 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2177 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2093 } 2178 }
2094 2179 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
2180 "mbx_count=%d, req_length=%d, "
2181 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
2182 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
2183 ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
2184 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
2185 ha->nvram_npiv_size);
2186 ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
2187 "isp_ops=%p, flash_conf_off=%d, "
2188 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
2189 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
2190 ha->nvram_conf_off, ha->nvram_data_off);
2095 mutex_init(&ha->vport_lock); 2191 mutex_init(&ha->vport_lock);
2096 init_completion(&ha->mbx_cmd_comp); 2192 init_completion(&ha->mbx_cmd_comp);
2097 complete(&ha->mbx_cmd_comp); 2193 complete(&ha->mbx_cmd_comp);
@@ -2101,10 +2197,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2101 set_bit(0, (unsigned long *) ha->vp_idx_map); 2197 set_bit(0, (unsigned long *) ha->vp_idx_map);
2102 2198
2103 qla2x00_config_dma_addressing(ha); 2199 qla2x00_config_dma_addressing(ha);
2200 ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
2201 "64 Bit addressing is %s.\n",
2202 ha->flags.enable_64bit_addressing ? "enable" :
2203 "disable");
2104 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2204 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2105 if (!ret) { 2205 if (!ret) {
2106 qla_printk(KERN_WARNING, ha, 2206 ql_log_pci(ql_log_fatal, pdev, 0x0031,
2107 "[ERROR] Failed to allocate memory for adapter\n"); 2207 "Failed to allocate memory for adapter, aborting.\n");
2108 2208
2109 goto probe_hw_failed; 2209 goto probe_hw_failed;
2110 } 2210 }
@@ -2116,9 +2216,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2116 2216
2117 base_vha = qla2x00_create_host(sht, ha); 2217 base_vha = qla2x00_create_host(sht, ha);
2118 if (!base_vha) { 2218 if (!base_vha) {
2119 qla_printk(KERN_WARNING, ha,
2120 "[ERROR] Failed to allocate memory for scsi_host\n");
2121
2122 ret = -ENOMEM; 2219 ret = -ENOMEM;
2123 qla2x00_mem_free(ha); 2220 qla2x00_mem_free(ha);
2124 qla2x00_free_req_que(ha, req); 2221 qla2x00_free_req_que(ha, req);
@@ -2145,7 +2242,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2145 if (!IS_QLA82XX(ha)) 2242 if (!IS_QLA82XX(ha))
2146 host->sg_tablesize = QLA_SG_ALL; 2243 host->sg_tablesize = QLA_SG_ALL;
2147 } 2244 }
2148 2245 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2246 "can_queue=%d, req=%p, "
2247 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2248 host->can_queue, base_vha->req,
2249 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2149 host->max_id = max_id; 2250 host->max_id = max_id;
2150 host->this_id = 255; 2251 host->this_id = 255;
2151 host->cmd_per_lun = 3; 2252 host->cmd_per_lun = 3;
@@ -2159,6 +2260,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2159 host->transportt = qla2xxx_transport_template; 2260 host->transportt = qla2xxx_transport_template;
2160 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2261 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2161 2262
2263 ql_dbg(ql_dbg_init, base_vha, 0x0033,
2264 "max_id=%d this_id=%d "
2265 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
2266 "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
2267 host->this_id, host->cmd_per_lun, host->unique_id,
2268 host->max_cmd_len, host->max_channel, host->max_lun,
2269 host->transportt, sht->vendor_id);
2270
2162 /* Set up the irqs */ 2271 /* Set up the irqs */
2163 ret = qla2x00_request_irqs(ha, rsp); 2272 ret = qla2x00_request_irqs(ha, rsp);
2164 if (ret) 2273 if (ret)
@@ -2169,9 +2278,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2169 /* Alloc arrays of request and response ring ptrs */ 2278 /* Alloc arrays of request and response ring ptrs */
2170que_init: 2279que_init:
2171 if (!qla2x00_alloc_queues(ha)) { 2280 if (!qla2x00_alloc_queues(ha)) {
2172 qla_printk(KERN_WARNING, ha, 2281 ql_log(ql_log_fatal, base_vha, 0x003d,
2173 "[ERROR] Failed to allocate memory for queue" 2282 "Failed to allocate memory for queue pointers.. aborting.\n");
2174 " pointers\n");
2175 goto probe_init_failed; 2283 goto probe_init_failed;
2176 } 2284 }
2177 2285
@@ -2199,20 +2307,33 @@ que_init:
2199 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 2307 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2200 } 2308 }
2201 2309
2202 if (qla2x00_initialize_adapter(base_vha)) { 2310 ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
2203 qla_printk(KERN_WARNING, ha, 2311 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2204 "Failed to initialize adapter\n"); 2312 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2313 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
2314 "req->req_q_in=%p req->req_q_out=%p "
2315 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2316 req->req_q_in, req->req_q_out,
2317 rsp->rsp_q_in, rsp->rsp_q_out);
2318 ql_dbg(ql_dbg_init, base_vha, 0x003e,
2319 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2320 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2321 ql_dbg(ql_dbg_init, base_vha, 0x003f,
2322 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2323 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
2205 2324
2206 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 2325 if (qla2x00_initialize_adapter(base_vha)) {
2207 "Adapter flags %x.\n", 2326 ql_log(ql_log_fatal, base_vha, 0x00d6,
2208 base_vha->host_no, base_vha->device_flags)); 2327 "Failed to initialize adapter - Adapter flags %x.\n",
2328 base_vha->device_flags);
2209 2329
2210 if (IS_QLA82XX(ha)) { 2330 if (IS_QLA82XX(ha)) {
2211 qla82xx_idc_lock(ha); 2331 qla82xx_idc_lock(ha);
2212 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2332 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2213 QLA82XX_DEV_FAILED); 2333 QLA82XX_DEV_FAILED);
2214 qla82xx_idc_unlock(ha); 2334 qla82xx_idc_unlock(ha);
2215 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 2335 ql_log(ql_log_fatal, base_vha, 0x00d7,
2336 "HW State: FAILED.\n");
2216 } 2337 }
2217 2338
2218 ret = -ENODEV; 2339 ret = -ENODEV;
@@ -2221,9 +2342,8 @@ que_init:
2221 2342
2222 if (ha->mqenable) { 2343 if (ha->mqenable) {
2223 if (qla25xx_setup_mode(base_vha)) { 2344 if (qla25xx_setup_mode(base_vha)) {
2224 qla_printk(KERN_WARNING, ha, 2345 ql_log(ql_log_warn, base_vha, 0x00ec,
2225 "Can't create queues, falling back to single" 2346 "Failed to create queues, falling back to single queue mode.\n");
2226 " queue mode\n");
2227 goto que_init; 2347 goto que_init;
2228 } 2348 }
2229 } 2349 }
@@ -2235,13 +2355,15 @@ que_init:
2235 * Startup the kernel thread for this host adapter 2355 * Startup the kernel thread for this host adapter
2236 */ 2356 */
2237 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 2357 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2238 "%s_dpc", base_vha->host_str); 2358 "%s_dpc", base_vha->host_str);
2239 if (IS_ERR(ha->dpc_thread)) { 2359 if (IS_ERR(ha->dpc_thread)) {
2240 qla_printk(KERN_WARNING, ha, 2360 ql_log(ql_log_fatal, base_vha, 0x00ed,
2241 "Unable to start DPC thread!\n"); 2361 "Failed to start DPC thread.\n");
2242 ret = PTR_ERR(ha->dpc_thread); 2362 ret = PTR_ERR(ha->dpc_thread);
2243 goto probe_failed; 2363 goto probe_failed;
2244 } 2364 }
2365 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
2366 "DPC thread started successfully.\n");
2245 2367
2246skip_dpc: 2368skip_dpc:
2247 list_add_tail(&base_vha->list, &ha->vp_list); 2369 list_add_tail(&base_vha->list, &ha->vp_list);
@@ -2249,16 +2371,18 @@ skip_dpc:
2249 2371
2250 /* Initialized the timer */ 2372 /* Initialized the timer */
2251 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 2373 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2252 2374 ql_dbg(ql_dbg_init, base_vha, 0x00ef,
2253 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2375 "Started qla2x00_timer with "
2254 base_vha->host_no, ha)); 2376 "interval=%d.\n", WATCH_INTERVAL);
2377 ql_dbg(ql_dbg_init, base_vha, 0x00f0,
2378 "Detected hba at address=%p.\n",
2379 ha);
2255 2380
2256 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2381 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2257 if (ha->fw_attributes & BIT_4) { 2382 if (ha->fw_attributes & BIT_4) {
2258 base_vha->flags.difdix_supported = 1; 2383 base_vha->flags.difdix_supported = 1;
2259 DEBUG18(qla_printk(KERN_INFO, ha, 2384 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2260 "Registering for DIF/DIX type 1 and 3" 2385 "Registering for DIF/DIX type 1 and 3 protection.\n");
2261 " protection.\n"));
2262 scsi_host_set_prot(host, 2386 scsi_host_set_prot(host,
2263 SHOST_DIF_TYPE1_PROTECTION 2387 SHOST_DIF_TYPE1_PROTECTION
2264 | SHOST_DIF_TYPE2_PROTECTION 2388 | SHOST_DIF_TYPE2_PROTECTION
@@ -2280,6 +2404,9 @@ skip_dpc:
2280 base_vha->flags.init_done = 1; 2404 base_vha->flags.init_done = 1;
2281 base_vha->flags.online = 1; 2405 base_vha->flags.online = 1;
2282 2406
2407 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2408 "Init done and hba is online.\n");
2409
2283 scsi_scan_host(host); 2410 scsi_scan_host(host);
2284 2411
2285 qla2x00_alloc_sysfs_attr(base_vha); 2412 qla2x00_alloc_sysfs_attr(base_vha);
@@ -2288,14 +2415,17 @@ skip_dpc:
2288 2415
2289 qla2x00_dfs_setup(base_vha); 2416 qla2x00_dfs_setup(base_vha);
2290 2417
2291 qla_printk(KERN_INFO, ha, "\n" 2418 ql_log(ql_log_info, base_vha, 0x00fa,
2292 " QLogic Fibre Channel HBA Driver: %s\n" 2419 "QLogic Fibre Channed HBA Driver: %s.\n",
2293 " QLogic %s - %s\n" 2420 qla2x00_version_str);
2294 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 2421 ql_log(ql_log_info, base_vha, 0x00fb,
2295 qla2x00_version_str, ha->model_number, 2422 "QLogic %s - %s.\n",
2296 ha->model_desc ? ha->model_desc : "", pdev->device, 2423 ha->model_number, ha->model_desc ? ha->model_desc : "");
2297 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev), 2424 ql_log(ql_log_info, base_vha, 0x00fc,
2298 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, 2425 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
2426 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
2427 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
2428 base_vha->host_no,
2299 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2429 ha->isp_ops->fw_version_str(base_vha, fw_str));
2300 2430
2301 return 0; 2431 return 0;
@@ -2593,20 +2723,15 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2593 fcport->login_retry = vha->hw->login_retry_count; 2723 fcport->login_retry = vha->hw->login_retry_count;
2594 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2724 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2595 2725
2596 DEBUG(printk("scsi(%ld): Port login retry: " 2726 ql_dbg(ql_dbg_disc, vha, 0x2067,
2727 "Port login retry "
2597 "%02x%02x%02x%02x%02x%02x%02x%02x, " 2728 "%02x%02x%02x%02x%02x%02x%02x%02x, "
2598 "id = 0x%04x retry cnt=%d\n", 2729 "id = 0x%04x retry cnt=%d.\n",
2599 vha->host_no, 2730 fcport->port_name[0], fcport->port_name[1],
2600 fcport->port_name[0], 2731 fcport->port_name[2], fcport->port_name[3],
2601 fcport->port_name[1], 2732 fcport->port_name[4], fcport->port_name[5],
2602 fcport->port_name[2], 2733 fcport->port_name[6], fcport->port_name[7],
2603 fcport->port_name[3], 2734 fcport->loop_id, fcport->login_retry);
2604 fcport->port_name[4],
2605 fcport->port_name[5],
2606 fcport->port_name[6],
2607 fcport->port_name[7],
2608 fcport->loop_id,
2609 fcport->login_retry));
2610 } 2735 }
2611} 2736}
2612 2737
@@ -2689,6 +2814,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2689 ctx_cachep); 2814 ctx_cachep);
2690 if (!ha->ctx_mempool) 2815 if (!ha->ctx_mempool)
2691 goto fail_free_srb_mempool; 2816 goto fail_free_srb_mempool;
2817 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
2818 "ctx_cachep=%p ctx_mempool=%p.\n",
2819 ctx_cachep, ha->ctx_mempool);
2692 } 2820 }
2693 2821
2694 /* Get memory for cached NVRAM */ 2822 /* Get memory for cached NVRAM */
@@ -2703,22 +2831,29 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2703 if (!ha->s_dma_pool) 2831 if (!ha->s_dma_pool)
2704 goto fail_free_nvram; 2832 goto fail_free_nvram;
2705 2833
2834 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
2835 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
2836 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
2837
2706 if (IS_QLA82XX(ha) || ql2xenabledif) { 2838 if (IS_QLA82XX(ha) || ql2xenabledif) {
2707 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2839 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2708 DSD_LIST_DMA_POOL_SIZE, 8, 0); 2840 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2709 if (!ha->dl_dma_pool) { 2841 if (!ha->dl_dma_pool) {
2710 qla_printk(KERN_WARNING, ha, 2842 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
2711 "Memory Allocation failed - dl_dma_pool\n"); 2843 "Failed to allocate memory for dl_dma_pool.\n");
2712 goto fail_s_dma_pool; 2844 goto fail_s_dma_pool;
2713 } 2845 }
2714 2846
2715 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2847 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2716 FCP_CMND_DMA_POOL_SIZE, 8, 0); 2848 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2717 if (!ha->fcp_cmnd_dma_pool) { 2849 if (!ha->fcp_cmnd_dma_pool) {
2718 qla_printk(KERN_WARNING, ha, 2850 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
2719 "Memory Allocation failed - fcp_cmnd_dma_pool\n"); 2851 "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
2720 goto fail_dl_dma_pool; 2852 goto fail_dl_dma_pool;
2721 } 2853 }
2854 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
2855 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
2856 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
2722 } 2857 }
2723 2858
2724 /* Allocate memory for SNS commands */ 2859 /* Allocate memory for SNS commands */
@@ -2728,6 +2863,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2728 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2863 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2729 if (!ha->sns_cmd) 2864 if (!ha->sns_cmd)
2730 goto fail_dma_pool; 2865 goto fail_dma_pool;
2866 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
2867 "sns_cmd.\n", ha->sns_cmd);
2731 } else { 2868 } else {
2732 /* Get consistent memory allocated for MS IOCB */ 2869 /* Get consistent memory allocated for MS IOCB */
2733 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2870 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -2739,12 +2876,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2739 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2876 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2740 if (!ha->ct_sns) 2877 if (!ha->ct_sns)
2741 goto fail_free_ms_iocb; 2878 goto fail_free_ms_iocb;
2879 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
2880 "ms_iocb=%p ct_sns=%p.\n",
2881 ha->ms_iocb, ha->ct_sns);
2742 } 2882 }
2743 2883
2744 /* Allocate memory for request ring */ 2884 /* Allocate memory for request ring */
2745 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 2885 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2746 if (!*req) { 2886 if (!*req) {
2747 DEBUG(printk("Unable to allocate memory for req\n")); 2887 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
2888 "Failed to allocate memory for req.\n");
2748 goto fail_req; 2889 goto fail_req;
2749 } 2890 }
2750 (*req)->length = req_len; 2891 (*req)->length = req_len;
@@ -2752,14 +2893,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2752 ((*req)->length + 1) * sizeof(request_t), 2893 ((*req)->length + 1) * sizeof(request_t),
2753 &(*req)->dma, GFP_KERNEL); 2894 &(*req)->dma, GFP_KERNEL);
2754 if (!(*req)->ring) { 2895 if (!(*req)->ring) {
2755 DEBUG(printk("Unable to allocate memory for req_ring\n")); 2896 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
2897 "Failed to allocate memory for req_ring.\n");
2756 goto fail_req_ring; 2898 goto fail_req_ring;
2757 } 2899 }
2758 /* Allocate memory for response ring */ 2900 /* Allocate memory for response ring */
2759 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 2901 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2760 if (!*rsp) { 2902 if (!*rsp) {
2761 qla_printk(KERN_WARNING, ha, 2903 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
2762 "Unable to allocate memory for rsp\n"); 2904 "Failed to allocate memory for rsp.\n");
2763 goto fail_rsp; 2905 goto fail_rsp;
2764 } 2906 }
2765 (*rsp)->hw = ha; 2907 (*rsp)->hw = ha;
@@ -2768,19 +2910,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2768 ((*rsp)->length + 1) * sizeof(response_t), 2910 ((*rsp)->length + 1) * sizeof(response_t),
2769 &(*rsp)->dma, GFP_KERNEL); 2911 &(*rsp)->dma, GFP_KERNEL);
2770 if (!(*rsp)->ring) { 2912 if (!(*rsp)->ring) {
2771 qla_printk(KERN_WARNING, ha, 2913 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
2772 "Unable to allocate memory for rsp_ring\n"); 2914 "Failed to allocate memory for rsp_ring.\n");
2773 goto fail_rsp_ring; 2915 goto fail_rsp_ring;
2774 } 2916 }
2775 (*req)->rsp = *rsp; 2917 (*req)->rsp = *rsp;
2776 (*rsp)->req = *req; 2918 (*rsp)->req = *req;
2919 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
2920 "req=%p req->length=%d req->ring=%p rsp=%p "
2921 "rsp->length=%d rsp->ring=%p.\n",
2922 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
2923 (*rsp)->ring);
2777 /* Allocate memory for NVRAM data for vports */ 2924 /* Allocate memory for NVRAM data for vports */
2778 if (ha->nvram_npiv_size) { 2925 if (ha->nvram_npiv_size) {
2779 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 2926 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2780 ha->nvram_npiv_size, GFP_KERNEL); 2927 ha->nvram_npiv_size, GFP_KERNEL);
2781 if (!ha->npiv_info) { 2928 if (!ha->npiv_info) {
2782 qla_printk(KERN_WARNING, ha, 2929 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
2783 "Unable to allocate memory for npiv info\n"); 2930 "Failed to allocate memory for npiv_info.\n");
2784 goto fail_npiv_info; 2931 goto fail_npiv_info;
2785 } 2932 }
2786 } else 2933 } else
@@ -2792,6 +2939,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2792 &ha->ex_init_cb_dma); 2939 &ha->ex_init_cb_dma);
2793 if (!ha->ex_init_cb) 2940 if (!ha->ex_init_cb)
2794 goto fail_ex_init_cb; 2941 goto fail_ex_init_cb;
2942 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
2943 "ex_init_cb=%p.\n", ha->ex_init_cb);
2795 } 2944 }
2796 2945
2797 INIT_LIST_HEAD(&ha->gbl_dsd_list); 2946 INIT_LIST_HEAD(&ha->gbl_dsd_list);
@@ -2802,6 +2951,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2802 &ha->async_pd_dma); 2951 &ha->async_pd_dma);
2803 if (!ha->async_pd) 2952 if (!ha->async_pd)
2804 goto fail_async_pd; 2953 goto fail_async_pd;
2954 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
2955 "async_pd=%p.\n", ha->async_pd);
2805 } 2956 }
2806 2957
2807 INIT_LIST_HEAD(&ha->vp_list); 2958 INIT_LIST_HEAD(&ha->vp_list);
@@ -2867,7 +3018,8 @@ fail_free_init_cb:
2867 ha->init_cb = NULL; 3018 ha->init_cb = NULL;
2868 ha->init_cb_dma = 0; 3019 ha->init_cb_dma = 0;
2869fail: 3020fail:
2870 DEBUG(printk("%s: Memory allocation failure\n", __func__)); 3021 ql_log(ql_log_fatal, NULL, 0x0030,
3022 "Memory allocation failure.\n");
2871 return -ENOMEM; 3023 return -ENOMEM;
2872} 3024}
2873 3025
@@ -3016,8 +3168,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3016 3168
3017 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 3169 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
3018 if (host == NULL) { 3170 if (host == NULL) {
3019 printk(KERN_WARNING 3171 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
3020 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 3172 "Failed to allocate host from the scsi layer, aborting.\n");
3021 goto fail; 3173 goto fail;
3022 } 3174 }
3023 3175
@@ -3036,6 +3188,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3036 spin_lock_init(&vha->work_lock); 3188 spin_lock_init(&vha->work_lock);
3037 3189
3038 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3190 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3191 ql_dbg(ql_dbg_init, vha, 0x0041,
3192 "Allocated the host=%p hw=%p vha=%p dev_name=%s",
3193 vha->host, vha->hw, vha,
3194 dev_name(&(ha->pdev->dev)));
3195
3039 return vha; 3196 return vha;
3040 3197
3041fail: 3198fail:
@@ -3277,18 +3434,18 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3277 if (status == QLA_SUCCESS) { 3434 if (status == QLA_SUCCESS) {
3278 fcport->old_loop_id = fcport->loop_id; 3435 fcport->old_loop_id = fcport->loop_id;
3279 3436
3280 DEBUG(printk("scsi(%ld): port login OK: logged " 3437 ql_dbg(ql_dbg_disc, vha, 0x2003,
3281 "in ID 0x%x\n", vha->host_no, fcport->loop_id)); 3438 "Port login OK: logged in ID 0x%x.\n",
3439 fcport->loop_id);
3282 3440
3283 qla2x00_update_fcport(vha, fcport); 3441 qla2x00_update_fcport(vha, fcport);
3284 3442
3285 } else if (status == 1) { 3443 } else if (status == 1) {
3286 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3444 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3287 /* retry the login again */ 3445 /* retry the login again */
3288 DEBUG(printk("scsi(%ld): Retrying" 3446 ql_dbg(ql_dbg_disc, vha, 0x2007,
3289 " %d login again loop_id 0x%x\n", 3447 "Retrying %d login again loop_id 0x%x.\n",
3290 vha->host_no, fcport->login_retry, 3448 fcport->login_retry, fcport->loop_id);
3291 fcport->loop_id));
3292 } else { 3449 } else {
3293 fcport->login_retry = 0; 3450 fcport->login_retry = 0;
3294 } 3451 }
@@ -3328,26 +3485,27 @@ qla2x00_do_dpc(void *data)
3328 3485
3329 set_current_state(TASK_INTERRUPTIBLE); 3486 set_current_state(TASK_INTERRUPTIBLE);
3330 while (!kthread_should_stop()) { 3487 while (!kthread_should_stop()) {
3331 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3488 ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
3489 "DPC handler sleeping.\n");
3332 3490
3333 schedule(); 3491 schedule();
3334 __set_current_state(TASK_RUNNING); 3492 __set_current_state(TASK_RUNNING);
3335 3493
3336 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 3494 ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
3495 "DPC handler waking up.\n");
3496 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3497 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3337 3498
3338 /* Initialization not yet finished. Don't do anything yet. */ 3499 /* Initialization not yet finished. Don't do anything yet. */
3339 if (!base_vha->flags.init_done) 3500 if (!base_vha->flags.init_done)
3340 continue; 3501 continue;
3341 3502
3342 if (ha->flags.eeh_busy) { 3503 if (ha->flags.eeh_busy) {
3343 DEBUG17(qla_printk(KERN_WARNING, ha, 3504 ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
3344 "qla2x00_do_dpc: dpc_flags: %lx\n", 3505 "eeh_busy=%d.\n", ha->flags.eeh_busy);
3345 base_vha->dpc_flags));
3346 continue; 3506 continue;
3347 } 3507 }
3348 3508
3349 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
3350
3351 ha->dpc_active = 1; 3509 ha->dpc_active = 1;
3352 3510
3353 if (ha->flags.mbox_busy) { 3511 if (ha->flags.mbox_busy) {
@@ -3364,8 +3522,8 @@ qla2x00_do_dpc(void *data)
3364 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3522 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3365 QLA82XX_DEV_FAILED); 3523 QLA82XX_DEV_FAILED);
3366 qla82xx_idc_unlock(ha); 3524 qla82xx_idc_unlock(ha);
3367 qla_printk(KERN_INFO, ha, 3525 ql_log(ql_log_info, base_vha, 0x4004,
3368 "HW State: FAILED\n"); 3526 "HW State: FAILED.\n");
3369 qla82xx_device_state_handler(base_vha); 3527 qla82xx_device_state_handler(base_vha);
3370 continue; 3528 continue;
3371 } 3529 }
@@ -3373,10 +3531,8 @@ qla2x00_do_dpc(void *data)
3373 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 3531 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3374 &base_vha->dpc_flags)) { 3532 &base_vha->dpc_flags)) {
3375 3533
3376 DEBUG(printk(KERN_INFO 3534 ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
3377 "scsi(%ld): dpc: sched " 3535 "FCoE context reset scheduled.\n");
3378 "qla82xx_fcoe_ctx_reset ha = %p\n",
3379 base_vha->host_no, ha));
3380 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3536 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3381 &base_vha->dpc_flags))) { 3537 &base_vha->dpc_flags))) {
3382 if (qla82xx_fcoe_ctx_reset(base_vha)) { 3538 if (qla82xx_fcoe_ctx_reset(base_vha)) {
@@ -3390,18 +3546,16 @@ qla2x00_do_dpc(void *data)
3390 &base_vha->dpc_flags); 3546 &base_vha->dpc_flags);
3391 } 3547 }
3392 3548
3393 DEBUG(printk("scsi(%ld): dpc:" 3549 ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
3394 " qla82xx_fcoe_ctx_reset end\n", 3550 "FCoE context reset end.\n");
3395 base_vha->host_no));
3396 } 3551 }
3397 } 3552 }
3398 3553
3399 if (test_and_clear_bit(ISP_ABORT_NEEDED, 3554 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3400 &base_vha->dpc_flags)) { 3555 &base_vha->dpc_flags)) {
3401 3556
3402 DEBUG(printk("scsi(%ld): dpc: sched " 3557 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
3403 "qla2x00_abort_isp ha = %p\n", 3558 "ISP abort scheduled.\n");
3404 base_vha->host_no, ha));
3405 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3559 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3406 &base_vha->dpc_flags))) { 3560 &base_vha->dpc_flags))) {
3407 3561
@@ -3414,8 +3568,8 @@ qla2x00_do_dpc(void *data)
3414 &base_vha->dpc_flags); 3568 &base_vha->dpc_flags);
3415 } 3569 }
3416 3570
3417 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 3571 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
3418 base_vha->host_no)); 3572 "ISP abort end.\n");
3419 } 3573 }
3420 3574
3421 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { 3575 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
@@ -3424,9 +3578,8 @@ qla2x00_do_dpc(void *data)
3424 } 3578 }
3425 3579
3426 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 3580 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3427 DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched " 3581 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3428 "qla2x00_quiesce_needed ha = %p\n", 3582 "Quiescence mode scheduled.\n");
3429 base_vha->host_no, ha));
3430 qla82xx_device_state_handler(base_vha); 3583 qla82xx_device_state_handler(base_vha);
3431 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); 3584 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
3432 if (!ha->flags.quiesce_owner) { 3585 if (!ha->flags.quiesce_owner) {
@@ -3436,17 +3589,20 @@ qla2x00_do_dpc(void *data)
3436 qla82xx_clear_qsnt_ready(base_vha); 3589 qla82xx_clear_qsnt_ready(base_vha);
3437 qla82xx_idc_unlock(ha); 3590 qla82xx_idc_unlock(ha);
3438 } 3591 }
3592 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
3593 "Quiescence mode end.\n");
3439 } 3594 }
3440 3595
3441 if (test_and_clear_bit(RESET_MARKER_NEEDED, 3596 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3442 &base_vha->dpc_flags) && 3597 &base_vha->dpc_flags) &&
3443 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 3598 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
3444 3599
3445 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 3600 ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
3446 base_vha->host_no)); 3601 "Reset marker scheduled.\n");
3447
3448 qla2x00_rst_aen(base_vha); 3602 qla2x00_rst_aen(base_vha);
3449 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 3603 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
3604 ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
3605 "Reset marker end.\n");
3450 } 3606 }
3451 3607
3452 /* Retry each device up to login retry count */ 3608 /* Retry each device up to login retry count */
@@ -3455,19 +3611,18 @@ qla2x00_do_dpc(void *data)
3455 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 3611 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3456 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 3612 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
3457 3613
3458 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 3614 ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
3459 base_vha->host_no)); 3615 "Relogin scheduled.\n");
3460 qla2x00_relogin(base_vha); 3616 qla2x00_relogin(base_vha);
3461 3617 ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
3462 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 3618 "Relogin end.\n");
3463 base_vha->host_no));
3464 } 3619 }
3465 3620
3466 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 3621 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3467 &base_vha->dpc_flags)) { 3622 &base_vha->dpc_flags)) {
3468 3623
3469 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 3624 ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
3470 base_vha->host_no)); 3625 "Loop resync scheduled.\n");
3471 3626
3472 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 3627 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
3473 &base_vha->dpc_flags))) { 3628 &base_vha->dpc_flags))) {
@@ -3478,8 +3633,8 @@ qla2x00_do_dpc(void *data)
3478 &base_vha->dpc_flags); 3633 &base_vha->dpc_flags);
3479 } 3634 }
3480 3635
3481 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 3636 ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
3482 base_vha->host_no)); 3637 "Loop resync end.\n");
3483 } 3638 }
3484 3639
3485 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 3640 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
@@ -3502,7 +3657,8 @@ qla2x00_do_dpc(void *data)
3502 } /* End of while(1) */ 3657 } /* End of while(1) */
3503 __set_current_state(TASK_RUNNING); 3658 __set_current_state(TASK_RUNNING);
3504 3659
3505 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3660 ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
3661 "DPC handler exiting.\n");
3506 3662
3507 /* 3663 /*
3508 * Make sure that nobody tries to wake us up again. 3664 * Make sure that nobody tries to wake us up again.
@@ -3609,9 +3765,11 @@ void
3609qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) 3765qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3610{ 3766{
3611 if (atomic_read(&sp->ref_count) == 0) { 3767 if (atomic_read(&sp->ref_count) == 0) {
3612 DEBUG2(qla_printk(KERN_WARNING, ha, 3768 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
3613 "SP reference-count to ZERO -- sp=%p\n", sp)); 3769 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
3614 DEBUG2(BUG()); 3770 sp, sp->cmd);
3771 if (ql2xextended_error_logging & ql_dbg_io)
3772 BUG();
3615 return; 3773 return;
3616 } 3774 }
3617 if (!atomic_dec_and_test(&sp->ref_count)) 3775 if (!atomic_dec_and_test(&sp->ref_count))
@@ -3639,6 +3797,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3639 struct req_que *req; 3797 struct req_que *req;
3640 3798
3641 if (ha->flags.eeh_busy) { 3799 if (ha->flags.eeh_busy) {
3800 ql_dbg(ql_dbg_timer, vha, 0x6000,
3801 "EEH = %d, restarting timer.\n",
3802 ha->flags.eeh_busy);
3642 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3803 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3643 return; 3804 return;
3644 } 3805 }
@@ -3663,9 +3824,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3663 if (atomic_read(&vha->loop_down_timer) == 3824 if (atomic_read(&vha->loop_down_timer) ==
3664 vha->loop_down_abort_time) { 3825 vha->loop_down_abort_time) {
3665 3826
3666 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 3827 ql_log(ql_log_info, vha, 0x6008,
3667 "queues before time expire\n", 3828 "Loop down - aborting the queues before time expires.\n");
3668 vha->host_no));
3669 3829
3670 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3830 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3671 atomic_set(&vha->loop_state, LOOP_DEAD); 3831 atomic_set(&vha->loop_state, LOOP_DEAD);
@@ -3710,10 +3870,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3710 /* if the loop has been down for 4 minutes, reinit adapter */ 3870 /* if the loop has been down for 4 minutes, reinit adapter */
3711 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 3871 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3712 if (!(vha->device_flags & DFLG_NO_CABLE)) { 3872 if (!(vha->device_flags & DFLG_NO_CABLE)) {
3713 DEBUG(printk("scsi(%ld): Loop down - " 3873 ql_log(ql_log_warn, vha, 0x6009,
3714 "aborting ISP.\n",
3715 vha->host_no));
3716 qla_printk(KERN_WARNING, ha,
3717 "Loop down - aborting ISP.\n"); 3874 "Loop down - aborting ISP.\n");
3718 3875
3719 if (IS_QLA82XX(ha)) 3876 if (IS_QLA82XX(ha))
@@ -3724,9 +3881,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3724 &vha->dpc_flags); 3881 &vha->dpc_flags);
3725 } 3882 }
3726 } 3883 }
3727 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 3884 ql_dbg(ql_dbg_timer, vha, 0x600a,
3728 vha->host_no, 3885 "Loop down - seconds remaining %d.\n",
3729 atomic_read(&vha->loop_down_timer))); 3886 atomic_read(&vha->loop_down_timer));
3730 } 3887 }
3731 3888
3732 /* Check if beacon LED needs to be blinked for physical host only */ 3889 /* Check if beacon LED needs to be blinked for physical host only */
@@ -3749,8 +3906,27 @@ qla2x00_timer(scsi_qla_host_t *vha)
3749 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 3906 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3750 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 3907 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3751 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 3908 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3752 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) 3909 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
3910 ql_dbg(ql_dbg_timer, vha, 0x600b,
3911 "isp_abort_needed=%d loop_resync_needed=%d "
3912 "fcport_update_needed=%d start_dpc=%d "
3913 "reset_marker_needed=%d",
3914 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
3915 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
3916 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
3917 start_dpc,
3918 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
3919 ql_dbg(ql_dbg_timer, vha, 0x600c,
3920 "beacon_blink_needed=%d isp_unrecoverable=%d "
3921 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
3922 "relogin_needed=%d.\n",
3923 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
3924 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
3925 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
3926 test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
3927 test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
3753 qla2xxx_wake_dpc(vha); 3928 qla2xxx_wake_dpc(vha);
3929 }
3754 3930
3755 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3931 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3756} 3932}
@@ -3819,8 +3995,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
3819 goto out; 3995 goto out;
3820 3996
3821 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 3997 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3822 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 3998 ql_log(ql_log_warn, vha, 0x0063,
3823 "(%s).\n", vha->host_no, blob->name)); 3999 "Failed to load firmware image (%s).\n", blob->name);
3824 blob->fw = NULL; 4000 blob->fw = NULL;
3825 blob = NULL; 4001 blob = NULL;
3826 goto out; 4002 goto out;
@@ -3849,8 +4025,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3849 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 4025 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3850 struct qla_hw_data *ha = vha->hw; 4026 struct qla_hw_data *ha = vha->hw;
3851 4027
3852 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", 4028 ql_dbg(ql_dbg_aer, vha, 0x9000,
3853 state)); 4029 "PCI error detected, state %x.\n", state);
3854 4030
3855 switch (state) { 4031 switch (state) {
3856 case pci_channel_io_normal: 4032 case pci_channel_io_normal:
@@ -3863,9 +4039,9 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3863 ha->flags.isp82xx_fw_hung = 1; 4039 ha->flags.isp82xx_fw_hung = 1;
3864 if (ha->flags.mbox_busy) { 4040 if (ha->flags.mbox_busy) {
3865 ha->flags.mbox_int = 1; 4041 ha->flags.mbox_int = 1;
3866 DEBUG2(qla_printk(KERN_ERR, ha, 4042 ql_dbg(ql_dbg_aer, vha, 0x9001,
3867 "Due to pci channel io frozen, doing premature " 4043 "Due to pci channel io frozen, doing premature "
3868 "completion of mbx command\n")); 4044 "completion of mbx command.\n");
3869 complete(&ha->mbx_intr_comp); 4045 complete(&ha->mbx_intr_comp);
3870 } 4046 }
3871 } 4047 }
@@ -3913,8 +4089,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3913 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4089 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3914 4090
3915 if (risc_paused) { 4091 if (risc_paused) {
3916 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 4092 ql_log(ql_log_info, base_vha, 0x9003,
3917 "Dumping firmware!\n"); 4093 "RISC paused -- mmio_enabled, Dumping firmware.\n");
3918 ha->isp_ops->fw_dump(base_vha, 0); 4094 ha->isp_ops->fw_dump(base_vha, 0);
3919 4095
3920 return PCI_ERS_RESULT_NEED_RESET; 4096 return PCI_ERS_RESULT_NEED_RESET;
@@ -3930,8 +4106,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3930 int fn; 4106 int fn;
3931 struct pci_dev *other_pdev = NULL; 4107 struct pci_dev *other_pdev = NULL;
3932 4108
3933 DEBUG17(qla_printk(KERN_INFO, ha, 4109 ql_dbg(ql_dbg_aer, base_vha, 0x9006,
3934 "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no)); 4110 "Entered %s.\n", __func__);
3935 4111
3936 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 4112 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3937 4113
@@ -3945,8 +4121,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3945 fn = PCI_FUNC(ha->pdev->devfn); 4121 fn = PCI_FUNC(ha->pdev->devfn);
3946 while (fn > 0) { 4122 while (fn > 0) {
3947 fn--; 4123 fn--;
3948 DEBUG17(qla_printk(KERN_INFO, ha, 4124 ql_dbg(ql_dbg_aer, base_vha, 0x9007,
3949 "Finding pci device at function = 0x%x\n", fn)); 4125 "Finding pci device at function = 0x%x.\n", fn);
3950 other_pdev = 4126 other_pdev =
3951 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 4127 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3952 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 4128 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
@@ -3955,9 +4131,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3955 if (!other_pdev) 4131 if (!other_pdev)
3956 continue; 4132 continue;
3957 if (atomic_read(&other_pdev->enable_cnt)) { 4133 if (atomic_read(&other_pdev->enable_cnt)) {
3958 DEBUG17(qla_printk(KERN_INFO, ha, 4134 ql_dbg(ql_dbg_aer, base_vha, 0x9008,
3959 "Found PCI func available and enabled at 0x%x\n", 4135 "Found PCI func available and enable at 0x%x.\n",
3960 fn)); 4136 fn);
3961 pci_dev_put(other_pdev); 4137 pci_dev_put(other_pdev);
3962 break; 4138 break;
3963 } 4139 }
@@ -3966,8 +4142,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3966 4142
3967 if (!fn) { 4143 if (!fn) {
3968 /* Reset owner */ 4144 /* Reset owner */
3969 DEBUG17(qla_printk(KERN_INFO, ha, 4145 ql_dbg(ql_dbg_aer, base_vha, 0x9009,
3970 "This devfn is reset owner = 0x%x\n", ha->pdev->devfn)); 4146 "This devfn is reset owner = 0x%x.\n",
4147 ha->pdev->devfn);
3971 qla82xx_idc_lock(ha); 4148 qla82xx_idc_lock(ha);
3972 4149
3973 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4150 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
@@ -3977,8 +4154,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3977 QLA82XX_IDC_VERSION); 4154 QLA82XX_IDC_VERSION);
3978 4155
3979 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 4156 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3980 DEBUG17(qla_printk(KERN_INFO, ha, 4157 ql_dbg(ql_dbg_aer, base_vha, 0x900a,
3981 "drv_active = 0x%x\n", drv_active)); 4158 "drv_active = 0x%x.\n", drv_active);
3982 4159
3983 qla82xx_idc_unlock(ha); 4160 qla82xx_idc_unlock(ha);
3984 /* Reset if device is not already reset 4161 /* Reset if device is not already reset
@@ -3991,12 +4168,14 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3991 qla82xx_idc_lock(ha); 4168 qla82xx_idc_lock(ha);
3992 4169
3993 if (rval != QLA_SUCCESS) { 4170 if (rval != QLA_SUCCESS) {
3994 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 4171 ql_log(ql_log_info, base_vha, 0x900b,
4172 "HW State: FAILED.\n");
3995 qla82xx_clear_drv_active(ha); 4173 qla82xx_clear_drv_active(ha);
3996 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4174 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3997 QLA82XX_DEV_FAILED); 4175 QLA82XX_DEV_FAILED);
3998 } else { 4176 } else {
3999 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 4177 ql_log(ql_log_info, base_vha, 0x900c,
4178 "HW State: READY.\n");
4000 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4179 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4001 QLA82XX_DEV_READY); 4180 QLA82XX_DEV_READY);
4002 qla82xx_idc_unlock(ha); 4181 qla82xx_idc_unlock(ha);
@@ -4009,8 +4188,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4009 } 4188 }
4010 qla82xx_idc_unlock(ha); 4189 qla82xx_idc_unlock(ha);
4011 } else { 4190 } else {
4012 DEBUG17(qla_printk(KERN_INFO, ha, 4191 ql_dbg(ql_dbg_aer, base_vha, 0x900d,
4013 "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); 4192 "This devfn is not reset owner = 0x%x.\n",
4193 ha->pdev->devfn);
4014 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 4194 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4015 QLA82XX_DEV_READY)) { 4195 QLA82XX_DEV_READY)) {
4016 ha->flags.isp82xx_fw_hung = 0; 4196 ha->flags.isp82xx_fw_hung = 0;
@@ -4034,7 +4214,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4034 struct rsp_que *rsp; 4214 struct rsp_que *rsp;
4035 int rc, retries = 10; 4215 int rc, retries = 10;
4036 4216
4037 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); 4217 ql_dbg(ql_dbg_aer, base_vha, 0x9004,
4218 "Slot Reset.\n");
4038 4219
4039 /* Workaround: qla2xxx driver which access hardware earlier 4220 /* Workaround: qla2xxx driver which access hardware earlier
4040 * needs error state to be pci_channel_io_online. 4221 * needs error state to be pci_channel_io_online.
@@ -4055,7 +4236,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4055 rc = pci_enable_device(pdev); 4236 rc = pci_enable_device(pdev);
4056 4237
4057 if (rc) { 4238 if (rc) {
4058 qla_printk(KERN_WARNING, ha, 4239 ql_log(ql_log_warn, base_vha, 0x9005,
4059 "Can't re-enable PCI device after reset.\n"); 4240 "Can't re-enable PCI device after reset.\n");
4060 goto exit_slot_reset; 4241 goto exit_slot_reset;
4061 } 4242 }
@@ -4085,8 +4266,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4085 4266
4086 4267
4087exit_slot_reset: 4268exit_slot_reset:
4088 DEBUG17(qla_printk(KERN_WARNING, ha, 4269 ql_dbg(ql_dbg_aer, base_vha, 0x900e,
4089 "slot_reset-return:ret=%x\n", ret)); 4270 "slot_reset return %x.\n", ret);
4090 4271
4091 return ret; 4272 return ret;
4092} 4273}
@@ -4098,13 +4279,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
4098 struct qla_hw_data *ha = base_vha->hw; 4279 struct qla_hw_data *ha = base_vha->hw;
4099 int ret; 4280 int ret;
4100 4281
4101 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); 4282 ql_dbg(ql_dbg_aer, base_vha, 0x900f,
4283 "pci_resume.\n");
4102 4284
4103 ret = qla2x00_wait_for_hba_online(base_vha); 4285 ret = qla2x00_wait_for_hba_online(base_vha);
4104 if (ret != QLA_SUCCESS) { 4286 if (ret != QLA_SUCCESS) {
4105 qla_printk(KERN_ERR, ha, 4287 ql_log(ql_log_fatal, base_vha, 0x9002,
4106 "the device failed to resume I/O " 4288 "The device failed to resume I/O from slot/link_reset.\n");
4107 "from slot/link_reset");
4108 } 4289 }
4109 4290
4110 pci_cleanup_aer_uncorrect_error_status(pdev); 4291 pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -4168,8 +4349,8 @@ qla2x00_module_init(void)
4168 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 4349 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
4169 SLAB_HWCACHE_ALIGN, NULL); 4350 SLAB_HWCACHE_ALIGN, NULL);
4170 if (srb_cachep == NULL) { 4351 if (srb_cachep == NULL) {
4171 printk(KERN_ERR 4352 ql_log(ql_log_fatal, NULL, 0x0001,
4172 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 4353 "Unable to allocate SRB cache...Failing load!.\n");
4173 return -ENOMEM; 4354 return -ENOMEM;
4174 } 4355 }
4175 4356
@@ -4182,13 +4363,15 @@ qla2x00_module_init(void)
4182 fc_attach_transport(&qla2xxx_transport_functions); 4363 fc_attach_transport(&qla2xxx_transport_functions);
4183 if (!qla2xxx_transport_template) { 4364 if (!qla2xxx_transport_template) {
4184 kmem_cache_destroy(srb_cachep); 4365 kmem_cache_destroy(srb_cachep);
4366 ql_log(ql_log_fatal, NULL, 0x0002,
4367 "fc_attach_transport failed...Failing load!.\n");
4185 return -ENODEV; 4368 return -ENODEV;
4186 } 4369 }
4187 4370
4188 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 4371 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
4189 if (apidev_major < 0) { 4372 if (apidev_major < 0) {
4190 printk(KERN_WARNING "qla2xxx: Unable to register char device " 4373 ql_log(ql_log_fatal, NULL, 0x0003,
4191 "%s\n", QLA2XXX_APIDEV); 4374 "Unable to register char device %s.\n", QLA2XXX_APIDEV);
4192 } 4375 }
4193 4376
4194 qla2xxx_transport_vport_template = 4377 qla2xxx_transport_vport_template =
@@ -4196,16 +4379,21 @@ qla2x00_module_init(void)
4196 if (!qla2xxx_transport_vport_template) { 4379 if (!qla2xxx_transport_vport_template) {
4197 kmem_cache_destroy(srb_cachep); 4380 kmem_cache_destroy(srb_cachep);
4198 fc_release_transport(qla2xxx_transport_template); 4381 fc_release_transport(qla2xxx_transport_template);
4382 ql_log(ql_log_fatal, NULL, 0x0004,
4383 "fc_attach_transport vport failed...Failing load!.\n");
4199 return -ENODEV; 4384 return -ENODEV;
4200 } 4385 }
4201 4386 ql_log(ql_log_info, NULL, 0x0005,
4202 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", 4387 "QLogic Fibre Channel HBA Driver: %s.\n",
4203 qla2x00_version_str); 4388 qla2x00_version_str);
4204 ret = pci_register_driver(&qla2xxx_pci_driver); 4389 ret = pci_register_driver(&qla2xxx_pci_driver);
4205 if (ret) { 4390 if (ret) {
4206 kmem_cache_destroy(srb_cachep); 4391 kmem_cache_destroy(srb_cachep);
4207 fc_release_transport(qla2xxx_transport_template); 4392 fc_release_transport(qla2xxx_transport_template);
4208 fc_release_transport(qla2xxx_transport_vport_template); 4393 fc_release_transport(qla2xxx_transport_vport_template);
4394 ql_log(ql_log_fatal, NULL, 0x0006,
4395 "pci_register_driver failed...ret=%d Failing load!.\n",
4396 ret);
4209 } 4397 }
4210 return ret; 4398 return ret;
4211} 4399}