aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-30 14:36:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-30 14:36:02 -0400
commit6c6e3b828b2a13b923b9465fc4316c5bdc92291f (patch)
treeca027f7d7645c577ed76fcc8358163eb1689d8ae /drivers/scsi
parentc11abbbaa3252875c5740a6880b9a1a6f1e2a870 (diff)
parentd272281c390eb6c3f1e70ed0337c9e619d99cd9c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (71 commits) [SCSI] fcoe: cleanup cpu selection for incoming requests [SCSI] fcoe: add fip retry to avoid missing critical keep alive [SCSI] libfc: fix warn on in lport retry [SCSI] libfc: Remove the reference to FCP packet from scsi_cmnd in case of error [SCSI] libfc: cleanup sending SRR request [SCSI] libfc: two minor changes in comments [SCSI] libfc, fcoe: ignore rx frame with wrong xid info [SCSI] libfc: release exchg cache [SCSI] libfc: use FC_MAX_ERROR_CNT [SCSI] fcoe: remove unused ptype field in fcoe_rcv_info [SCSI] bnx2fc: Update copyright and bump version to 1.0.4 [SCSI] bnx2fc: Tx BDs cache in write tasks [SCSI] bnx2fc: Do not arm CQ when there are no CQEs [SCSI] bnx2fc: hold tgt lock when calling cmd_release [SCSI] bnx2fc: Enable support for sequence level error recovery [SCSI] bnx2fc: HSI changes for tape [SCSI] bnx2fc: Handle REC_TOV error code from firmware [SCSI] bnx2fc: REC/SRR link service request and response handling [SCSI] bnx2fc: Support 'sequence cleanup' task [SCSI] dh_rdac: Associate HBA and storage in rdac_controller to support partitions in storage ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/bfa/bfa.h51
-rw-r--r--drivers/scsi/bfa/bfa_core.c60
-rw-r--r--drivers/scsi/bfa/bfa_defs.h171
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h99
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c736
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h45
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c26
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c37
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c74
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c49
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c38
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c25
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c569
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h48
-rw-r--r--drivers/scsi/bfa/bfa_modules.h3
-rw-r--r--drivers/scsi/bfa/bfa_svc.c249
-rw-r--r--drivers/scsi/bfa/bfa_svc.h29
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c1082
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h237
-rw-r--r--drivers/scsi/bfa/bfad_drv.h6
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bfa/bfad_im.h22
-rw-r--r--drivers/scsi/bfa/bfi.h20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h107
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c434
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c732
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c433
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c194
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c82
-rw-r--r--drivers/scsi/fcoe/fcoe.c69
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_exch.c9
-rw-r--r--drivers/scsi/libfc/fc_fcp.c9
-rw-r--r--drivers/scsi/libfc/fc_lport.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c161
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1354
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h125
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c105
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c222
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c97
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c399
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c18
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mvsas/Kconfig9
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c101
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c508
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h99
-rw-r--r--drivers/scsi/mvsas/mv_chips.h17
-rw-r--r--drivers/scsi/mvsas/mv_defs.h11
-rw-r--r--drivers/scsi/mvsas/mv_init.c187
-rw-r--r--drivers/scsi/mvsas/mv_sas.c422
-rw-r--r--drivers/scsi/mvsas/mv_sas.h105
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c183
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c441
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c396
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h187
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c371
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c856
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c663
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c1091
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c160
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c556
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c747
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c275
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_spi.c24
90 files changed, 11870 insertions, 4487 deletions
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 3b0af1102bf4..a796de935054 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -27,6 +27,7 @@
27struct bfa_s; 27struct bfa_s;
28 28
29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); 29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
30typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
30 31
31/* 32/*
32 * Interrupt message handlers 33 * Interrupt message handlers
@@ -121,6 +122,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
121#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ 122#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
122 (__hcb_qe)->cbfn = (__cbfn); \ 123 (__hcb_qe)->cbfn = (__cbfn); \
123 (__hcb_qe)->cbarg = (__cbarg); \ 124 (__hcb_qe)->cbarg = (__cbarg); \
125 (__hcb_qe)->pre_rmv = BFA_FALSE; \
124 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ 126 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
125 } while (0) 127 } while (0)
126 128
@@ -135,6 +137,11 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
135 } \ 137 } \
136 } while (0) 138 } while (0)
137 139
140#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \
141 (__hcb_qe)->fw_status = (__status); \
142 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
143} while (0)
144
138#define bfa_cb_queue_done(__hcb_qe) do { \ 145#define bfa_cb_queue_done(__hcb_qe) do { \
139 (__hcb_qe)->once = BFA_FALSE; \ 146 (__hcb_qe)->once = BFA_FALSE; \
140 } while (0) 147 } while (0)
@@ -177,7 +184,7 @@ struct bfa_msix_s {
177struct bfa_hwif_s { 184struct bfa_hwif_s {
178 void (*hw_reginit)(struct bfa_s *bfa); 185 void (*hw_reginit)(struct bfa_s *bfa);
179 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); 186 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
180 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); 187 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
181 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); 188 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
182 void (*hw_msix_ctrl_install)(struct bfa_s *bfa); 189 void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
183 void (*hw_msix_queue_install)(struct bfa_s *bfa); 190 void (*hw_msix_queue_install)(struct bfa_s *bfa);
@@ -268,10 +275,8 @@ struct bfa_iocfc_s {
268 ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa)) 275 ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
269#define bfa_msix_uninstall(__bfa) \ 276#define bfa_msix_uninstall(__bfa) \
270 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) 277 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
271#define bfa_isr_rspq_ack(__bfa, __queue) do { \ 278#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \
272 if ((__bfa)->iocfc.hwif.hw_rspq_ack) \ 279 ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
273 (__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue); \
274} while (0)
275#define bfa_isr_reqq_ack(__bfa, __queue) do { \ 280#define bfa_isr_reqq_ack(__bfa, __queue) do { \
276 if ((__bfa)->iocfc.hwif.hw_reqq_ack) \ 281 if ((__bfa)->iocfc.hwif.hw_reqq_ack) \
277 (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \ 282 (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \
@@ -311,7 +316,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
311void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); 316void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
312 317
313void bfa_hwcb_reginit(struct bfa_s *bfa); 318void bfa_hwcb_reginit(struct bfa_s *bfa);
314void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); 319void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
315void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); 320void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
316void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa); 321void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
317void bfa_hwcb_msix_queue_install(struct bfa_s *bfa); 322void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
@@ -324,7 +329,8 @@ void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
324void bfa_hwct_reginit(struct bfa_s *bfa); 329void bfa_hwct_reginit(struct bfa_s *bfa);
325void bfa_hwct2_reginit(struct bfa_s *bfa); 330void bfa_hwct2_reginit(struct bfa_s *bfa);
326void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 331void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
327void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 332void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
333void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
328void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); 334void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
329void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa); 335void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
330void bfa_hwct_msix_queue_install(struct bfa_s *bfa); 336void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
@@ -376,6 +382,22 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
376#define bfa_get_fw_clock_res(__bfa) \ 382#define bfa_get_fw_clock_res(__bfa) \
377 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res) 383 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
378 384
385/*
386 * lun mask macros return NULL when min cfg is enabled and there is
387 * no memory allocated for lunmask.
388 */
389#define bfa_get_lun_mask(__bfa) \
390 ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
391 (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
392
393#define bfa_get_lun_mask_list(_bfa) \
394 ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
395 (bfa_get_lun_mask(_bfa)->lun_list)
396
397#define bfa_get_lun_mask_status(_bfa) \
398 (((&(_bfa)->modules.dconf_mod)->min_cfg) \
399 ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
400
379void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); 401void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
380void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); 402void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
381void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); 403void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
@@ -406,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
406 428
407void bfa_iocfc_enable(struct bfa_s *bfa); 429void bfa_iocfc_enable(struct bfa_s *bfa);
408void bfa_iocfc_disable(struct bfa_s *bfa); 430void bfa_iocfc_disable(struct bfa_s *bfa);
431void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
409#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ 432#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
410 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) 433 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
411 434
435struct bfa_cb_pending_q_s {
436 struct bfa_cb_qe_s hcb_qe;
437 void *data; /* Driver buffer */
438};
439
440/* Common macros to operate on pending stats/attr apis */
441#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \
442 bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
443 (__qe)->hcb_qe.cbfn = (__cbfn); \
444 (__qe)->hcb_qe.cbarg = (__cbarg); \
445 (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
446 (__qe)->data = (__data); \
447} while (0)
448
412#endif /* __BFA_H__ */ 449#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index c38e589105a5..4bd546bcc240 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -33,6 +33,7 @@ static struct bfa_module_s *hal_mods[] = {
33 &hal_mod_uf, 33 &hal_mod_uf,
34 &hal_mod_rport, 34 &hal_mod_rport,
35 &hal_mod_fcp, 35 &hal_mod_fcp,
36 &hal_mod_dconf,
36 NULL 37 NULL
37}; 38};
38 39
@@ -237,8 +238,6 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
237 u32 pi, ci; 238 u32 pi, ci;
238 struct list_head *waitq; 239 struct list_head *waitq;
239 240
240 bfa_isr_rspq_ack(bfa, qid);
241
242 ci = bfa_rspq_ci(bfa, qid); 241 ci = bfa_rspq_ci(bfa, qid);
243 pi = bfa_rspq_pi(bfa, qid); 242 pi = bfa_rspq_pi(bfa, qid);
244 243
@@ -251,11 +250,9 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
251 } 250 }
252 251
253 /* 252 /*
254 * update CI 253 * acknowledge RME completions and update CI
255 */ 254 */
256 bfa_rspq_ci(bfa, qid) = pi; 255 bfa_isr_rspq_ack(bfa, qid, ci);
257 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
258 mmiowb();
259 256
260 /* 257 /*
261 * Resume any pending requests in the corresponding reqq. 258 * Resume any pending requests in the corresponding reqq.
@@ -325,23 +322,19 @@ bfa_intx(struct bfa_s *bfa)
325 int queue; 322 int queue;
326 323
327 intr = readl(bfa->iocfc.bfa_regs.intr_status); 324 intr = readl(bfa->iocfc.bfa_regs.intr_status);
328 if (!intr)
329 return BFA_FALSE;
330 325
331 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); 326 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
332 if (qintr) 327 if (qintr)
333 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 328 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
334 329
335 /* 330 /*
336 * RME completion queue interrupt 331 * Unconditional RME completion queue interrupt
337 */ 332 */
338 qintr = intr & __HFN_INT_RME_MASK; 333 if (bfa->queue_process) {
339 if (qintr && bfa->queue_process) {
340 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 334 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
341 bfa_isr_rspq(bfa, queue); 335 bfa_isr_rspq(bfa, queue);
342 } 336 }
343 337
344 intr &= ~qintr;
345 if (!intr) 338 if (!intr)
346 return BFA_TRUE; 339 return BFA_TRUE;
347 340
@@ -432,7 +425,8 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
432 __HFN_INT_MBOX_LPU1_CT2); 425 __HFN_INT_MBOX_LPU1_CT2);
433 intr &= __HFN_INT_ERR_MASK_CT2; 426 intr &= __HFN_INT_ERR_MASK_CT2;
434 } else { 427 } else {
435 halt_isr = intr & __HFN_INT_LL_HALT; 428 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
429 (intr & __HFN_INT_LL_HALT) : 0;
436 pss_isr = intr & __HFN_INT_ERR_PSS; 430 pss_isr = intr & __HFN_INT_ERR_PSS;
437 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); 431 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
438 intr &= __HFN_INT_ERR_MASK; 432 intr &= __HFN_INT_ERR_MASK;
@@ -578,7 +572,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
578 } else { 572 } else {
579 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 573 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
580 iocfc->hwif.hw_reqq_ack = NULL; 574 iocfc->hwif.hw_reqq_ack = NULL;
581 iocfc->hwif.hw_rspq_ack = NULL; 575 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
582 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 576 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
583 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; 577 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
584 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; 578 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
@@ -595,7 +589,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
595 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { 589 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
596 iocfc->hwif.hw_reginit = bfa_hwct2_reginit; 590 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
597 iocfc->hwif.hw_isr_mode_set = NULL; 591 iocfc->hwif.hw_isr_mode_set = NULL;
598 iocfc->hwif.hw_rspq_ack = NULL; 592 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
599 } 593 }
600 594
601 iocfc->hwif.hw_reginit(bfa); 595 iocfc->hwif.hw_reginit(bfa);
@@ -685,7 +679,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
685 679
686 bfa->queue_process = BFA_TRUE; 680 bfa->queue_process = BFA_TRUE;
687 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 681 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
688 bfa_isr_rspq_ack(bfa, i); 682 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
689 683
690 for (i = 0; hal_mods[i]; i++) 684 for (i = 0; hal_mods[i]; i++)
691 hal_mods[i]->start(bfa); 685 hal_mods[i]->start(bfa);
@@ -709,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
709 struct bfa_s *bfa = bfa_arg; 703 struct bfa_s *bfa = bfa_arg;
710 704
711 if (complete) { 705 if (complete) {
712 if (bfa->iocfc.cfgdone) 706 if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
713 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 707 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
714 else 708 else
715 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 709 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
@@ -822,9 +816,11 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
822 */ 816 */
823 bfa_fcport_init(bfa); 817 bfa_fcport_init(bfa);
824 818
825 if (iocfc->action == BFA_IOCFC_ACT_INIT) 819 if (iocfc->action == BFA_IOCFC_ACT_INIT) {
826 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); 820 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
827 else { 821 bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
822 bfa_iocfc_init_cb, bfa);
823 } else {
828 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) 824 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
829 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, 825 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
830 bfa_iocfc_enable_cb, bfa); 826 bfa_iocfc_enable_cb, bfa);
@@ -1045,6 +1041,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1045 } 1041 }
1046 1042
1047 bfa_iocfc_send_cfg(bfa); 1043 bfa_iocfc_send_cfg(bfa);
1044 bfa_dconf_modinit(bfa);
1048} 1045}
1049 1046
1050/* 1047/*
@@ -1207,7 +1204,9 @@ bfa_iocfc_stop(struct bfa_s *bfa)
1207 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 1204 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1208 1205
1209 bfa->queue_process = BFA_FALSE; 1206 bfa->queue_process = BFA_FALSE;
1210 bfa_ioc_disable(&bfa->ioc); 1207 bfa_dconf_modexit(bfa);
1208 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
1209 bfa_ioc_disable(&bfa->ioc);
1211} 1210}
1212 1211
1213void 1212void
@@ -1540,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1540 struct list_head *qe; 1539 struct list_head *qe;
1541 struct list_head *qen; 1540 struct list_head *qen;
1542 struct bfa_cb_qe_s *hcb_qe; 1541 struct bfa_cb_qe_s *hcb_qe;
1542 bfa_cb_cbfn_status_t cbfn;
1543 1543
1544 list_for_each_safe(qe, qen, comp_q) { 1544 list_for_each_safe(qe, qen, comp_q) {
1545 hcb_qe = (struct bfa_cb_qe_s *) qe; 1545 hcb_qe = (struct bfa_cb_qe_s *) qe;
1546 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1546 if (hcb_qe->pre_rmv) {
1547 /* qe is invalid after return, dequeue before cbfn() */
1548 list_del(qe);
1549 cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
1550 cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
1551 } else
1552 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1547 } 1553 }
1548} 1554}
1549 1555
@@ -1556,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1556 while (!list_empty(comp_q)) { 1562 while (!list_empty(comp_q)) {
1557 bfa_q_deq(comp_q, &qe); 1563 bfa_q_deq(comp_q, &qe);
1558 hcb_qe = (struct bfa_cb_qe_s *) qe; 1564 hcb_qe = (struct bfa_cb_qe_s *) qe;
1565 WARN_ON(hcb_qe->pre_rmv);
1559 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1566 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1560 } 1567 }
1561} 1568}
1562 1569
1570void
1571bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
1572{
1573 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
1574 if (bfa->iocfc.cfgdone == BFA_TRUE)
1575 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1576 bfa_iocfc_init_cb, bfa);
1577 }
1578}
1563 1579
1564/* 1580/*
1565 * Return the list of PCI vendor/device id lists supported by this 1581 * Return the list of PCI vendor/device id lists supported by this
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index ed8d31b0188b..7b3d235d20b4 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -144,6 +144,7 @@ enum bfa_status {
144 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */ 144 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
145 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */ 145 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
146 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */ 146 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
147 BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
147 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */ 148 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
148 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */ 149 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
149 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */ 150 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
@@ -164,6 +165,8 @@ enum bfa_status {
164 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ 165 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
165 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot 166 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
166 * configuration */ 167 * configuration */
168 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
169 BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
167 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */ 170 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
168 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on 171 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
169 * this adapter */ 172 * this adapter */
@@ -172,11 +175,15 @@ enum bfa_status {
172 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */ 175 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
173 BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */ 176 BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
174 BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */ 177 BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
178 BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */
179 BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
180 BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */
175 BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */ 181 BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */
176 BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */ 182 BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */
177 BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */ 183 BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */
178 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ 184 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
179 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ 185 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
186 BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
180 BFA_STATUS_MAX_VAL /* Unknown error code */ 187 BFA_STATUS_MAX_VAL /* Unknown error code */
181}; 188};
182#define bfa_status_t enum bfa_status 189#define bfa_status_t enum bfa_status
@@ -359,6 +366,139 @@ struct bfa_ioc_attr_s {
359}; 366};
360 367
361/* 368/*
369 * AEN related definitions
370 */
371enum bfa_aen_category {
372 BFA_AEN_CAT_ADAPTER = 1,
373 BFA_AEN_CAT_PORT = 2,
374 BFA_AEN_CAT_LPORT = 3,
375 BFA_AEN_CAT_RPORT = 4,
376 BFA_AEN_CAT_ITNIM = 5,
377 BFA_AEN_CAT_AUDIT = 8,
378 BFA_AEN_CAT_IOC = 9,
379};
380
381/* BFA adapter level events */
382enum bfa_adapter_aen_event {
383 BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
384 BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
385};
386
387struct bfa_adapter_aen_data_s {
388 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
389 u32 nports; /* Number of NPorts */
390 wwn_t pwwn; /* WWN of one of its physical port */
391};
392
393/* BFA physical port Level events */
394enum bfa_port_aen_event {
395 BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
396 BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
397 BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
398 BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
399 BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
400 BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
401 BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
402 BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
403 BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
404 BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
405 BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
406 BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
407 BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */
408 BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
409 BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
410};
411
412enum bfa_port_aen_sfp_pom {
413 BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
414 BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
415 BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
416 BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
417};
418
419struct bfa_port_aen_data_s {
420 wwn_t pwwn; /* WWN of the physical port */
421 wwn_t fwwn; /* WWN of the fabric port */
422 u32 phy_port_num; /* For SFP related events */
423 u16 ioc_type;
424 u16 level; /* Only transitions will be informed */
425 mac_t mac; /* MAC address of the ethernet port */
426 u16 rsvd;
427};
428
429/* BFA AEN logical port events */
430enum bfa_lport_aen_event {
431 BFA_LPORT_AEN_NEW = 1, /* LPort created event */
432 BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
433 BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
434 BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
435 BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
436 BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
437 BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
438 BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
439 BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
440 BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */
441 BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
442 BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */
443};
444
445struct bfa_lport_aen_data_s {
446 u16 vf_id; /* vf_id of this logical port */
447 u16 roles; /* Logical port mode,IM/TM/IP etc */
448 u32 rsvd;
449 wwn_t ppwwn; /* WWN of its physical port */
450 wwn_t lpwwn; /* WWN of this logical port */
451};
452
453/* BFA ITNIM events */
454enum bfa_itnim_aen_event {
455 BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
456 BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
457 BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
458};
459
460struct bfa_itnim_aen_data_s {
461 u16 vf_id; /* vf_id of the IT nexus */
462 u16 rsvd[3];
463 wwn_t ppwwn; /* WWN of its physical port */
464 wwn_t lpwwn; /* WWN of logical port */
465 wwn_t rpwwn; /* WWN of remote(target) port */
466};
467
468/* BFA audit events */
469enum bfa_audit_aen_event {
470 BFA_AUDIT_AEN_AUTH_ENABLE = 1,
471 BFA_AUDIT_AEN_AUTH_DISABLE = 2,
472 BFA_AUDIT_AEN_FLASH_ERASE = 3,
473 BFA_AUDIT_AEN_FLASH_UPDATE = 4,
474};
475
476struct bfa_audit_aen_data_s {
477 wwn_t pwwn;
478 int partition_inst;
479 int partition_type;
480};
481
482/* BFA IOC level events */
483enum bfa_ioc_aen_event {
484 BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
485 BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
486 BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
487 BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
488 BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
489 BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
490 BFA_IOC_AEN_INVALID_VENDOR = 7,
491 BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
492 BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
493};
494
495struct bfa_ioc_aen_data_s {
496 wwn_t pwwn;
497 u16 ioc_type;
498 mac_t mac;
499};
500
501/*
362 * ---------------------- mfg definitions ------------ 502 * ---------------------- mfg definitions ------------
363 */ 503 */
364 504
@@ -520,6 +660,20 @@ struct bfa_boot_bootlun_s {
520/* 660/*
521 * BOOT boot configuraton 661 * BOOT boot configuraton
522 */ 662 */
663struct bfa_boot_cfg_s {
664 u8 version;
665 u8 rsvd1;
666 u16 chksum;
667 u8 enable; /* enable/disable SAN boot */
668 u8 speed; /* boot speed settings */
669 u8 topology; /* boot topology setting */
670 u8 bootopt; /* bfa_boot_bootopt_t */
671 u32 nbluns; /* number of boot luns */
672 u32 rsvd2;
673 struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
674 struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
675};
676
523struct bfa_boot_pbc_s { 677struct bfa_boot_pbc_s {
524 u8 enable; /* enable/disable SAN boot */ 678 u8 enable; /* enable/disable SAN boot */
525 u8 speed; /* boot speed settings */ 679 u8 speed; /* boot speed settings */
@@ -529,6 +683,15 @@ struct bfa_boot_pbc_s {
529 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX]; 683 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
530}; 684};
531 685
686struct bfa_ethboot_cfg_s {
687 u8 version;
688 u8 rsvd1;
689 u16 chksum;
690 u8 enable; /* enable/disable Eth/PXE boot */
691 u8 rsvd2;
692 u16 vlan;
693};
694
532/* 695/*
533 * ASIC block configuration related structures 696 * ASIC block configuration related structures
534 */ 697 */
@@ -587,6 +750,14 @@ struct bfa_ablk_cfg_s {
587 */ 750 */
588#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */ 751#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */
589 752
753/* SFP state change notification event */
754#define BFA_SFP_SCN_REMOVED 0
755#define BFA_SFP_SCN_INSERTED 1
756#define BFA_SFP_SCN_POM 2
757#define BFA_SFP_SCN_FAILED 3
758#define BFA_SFP_SCN_UNSUPPORT 4
759#define BFA_SFP_SCN_VALID 5
760
590enum bfa_defs_sfp_media_e { 761enum bfa_defs_sfp_media_e {
591 BFA_SFP_MEDIA_UNKNOWN = 0x00, 762 BFA_SFP_MEDIA_UNKNOWN = 0x00,
592 BFA_SFP_MEDIA_CU = 0x01, 763 BFA_SFP_MEDIA_CU = 0x01,
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 0b97525803fb..863c6ba7d5eb 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -268,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s {
268 u32 error_resets; /* error resets initiated by upsm */ 268 u32 error_resets; /* error resets initiated by upsm */
269 u32 sync_lost; /* Sync loss count */ 269 u32 sync_lost; /* Sync loss count */
270 u32 sig_lost; /* Signal loss count */ 270 u32 sig_lost; /* Signal loss count */
271 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
271}; 272};
272 273
273struct bfa_fw_port_physm_stats_s { 274struct bfa_fw_port_physm_stats_s {
@@ -468,6 +469,7 @@ struct bfa_fw_stats_s {
468 * QoS states 469 * QoS states
469 */ 470 */
470enum bfa_qos_state { 471enum bfa_qos_state {
472 BFA_QOS_DISABLED = 0, /* QoS is disabled */
471 BFA_QOS_ONLINE = 1, /* QoS is online */ 473 BFA_QOS_ONLINE = 1, /* QoS is online */
472 BFA_QOS_OFFLINE = 2, /* QoS is offline */ 474 BFA_QOS_OFFLINE = 2, /* QoS is offline */
473}; 475};
@@ -670,6 +672,12 @@ struct bfa_itnim_iostats_s {
670 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ 672 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
671 u32 tm_cleanups; /* TM cleanup requests */ 673 u32 tm_cleanups; /* TM cleanup requests */
672 u32 tm_cleanup_comps; /* TM cleanup completions */ 674 u32 tm_cleanup_comps; /* TM cleanup completions */
675 u32 lm_lun_across_sg; /* LM lun is across sg data buf */
676 u32 lm_lun_not_sup; /* LM lun not supported */
677 u32 lm_rpl_data_changed; /* LM report-lun data changed */
678 u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
679 u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
680 u32 lm_lun_not_rdy; /* LM lun not ready */
673}; 681};
674 682
675/* Modify char* port_stt[] in bfal_port.c if a new state was added */ 683/* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -785,8 +793,51 @@ enum bfa_port_linkstate_rsn {
785 CEE_ISCSI_PRI_PFC_OFF = 42, 793 CEE_ISCSI_PRI_PFC_OFF = 42,
786 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 794 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
787}; 795};
796
797#define MAX_LUN_MASK_CFG 16
798
799/*
800 * Initially flash content may be fff. On making LUN mask enable and disable
801 * state chnage. when report lun command is being processed it goes from
802 * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
803 * BFA_LUN_MASK_ACTIVE.
804 */
805enum bfa_ioim_lun_mask_state_s {
806 BFA_IOIM_LUN_MASK_INACTIVE = 0,
807 BFA_IOIM_LUN_MASK_ACTIVE = 1,
808 BFA_IOIM_LUN_MASK_FETCHED = 2,
809};
810
811enum bfa_lunmask_state_s {
812 BFA_LUNMASK_DISABLED = 0x00,
813 BFA_LUNMASK_ENABLED = 0x01,
814 BFA_LUNMASK_MINCFG = 0x02,
815 BFA_LUNMASK_UNINITIALIZED = 0xff,
816};
817
788#pragma pack(1) 818#pragma pack(1)
789/* 819/*
820 * LUN mask configuration
821 */
822struct bfa_lun_mask_s {
823 wwn_t lp_wwn;
824 wwn_t rp_wwn;
825 struct scsi_lun lun;
826 u8 ua;
827 u8 rsvd[3];
828 u16 rp_tag;
829 u8 lp_tag;
830 u8 state;
831};
832
833#define MAX_LUN_MASK_CFG 16
834struct bfa_lunmask_cfg_s {
835 u32 status;
836 u32 rsvd;
837 struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
838};
839
840/*
790 * Physical port configuration 841 * Physical port configuration
791 */ 842 */
792struct bfa_port_cfg_s { 843struct bfa_port_cfg_s {
@@ -1228,4 +1279,52 @@ struct bfa_cee_stats_s {
1228 1279
1229#pragma pack() 1280#pragma pack()
1230 1281
1282/*
1283 * AEN related definitions
1284 */
1285#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
1286 | BFA_PCI_VENDOR_ID_BROCADE)
1287
1288/* BFA remote port events */
1289enum bfa_rport_aen_event {
1290 BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
1291 BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
1292 BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
1293 BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
1294 BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
1295};
1296
1297struct bfa_rport_aen_data_s {
1298 u16 vf_id; /* vf_id of this logical port */
1299 u16 rsvd[3];
1300 wwn_t ppwwn; /* WWN of its physical port */
1301 wwn_t lpwwn; /* WWN of this logical port */
1302 wwn_t rpwwn; /* WWN of this remote port */
1303 union {
1304 struct bfa_rport_qos_attr_s qos;
1305 } priv;
1306};
1307
1308union bfa_aen_data_u {
1309 struct bfa_adapter_aen_data_s adapter;
1310 struct bfa_port_aen_data_s port;
1311 struct bfa_lport_aen_data_s lport;
1312 struct bfa_rport_aen_data_s rport;
1313 struct bfa_itnim_aen_data_s itnim;
1314 struct bfa_audit_aen_data_s audit;
1315 struct bfa_ioc_aen_data_s ioc;
1316};
1317
1318#define BFA_AEN_MAX_ENTRY 512
1319
1320struct bfa_aen_entry_s {
1321 struct list_head qe;
1322 enum bfa_aen_category aen_category;
1323 u32 aen_type;
1324 union bfa_aen_data_u aen_data;
1325 struct timeval aen_tv;
1326 u32 seq_num;
1327 u32 bfad_num;
1328};
1329
1231#endif /* __BFA_DEFS_SVC_H__ */ 1330#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 8d0b88f67a38..50b6a1c86195 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,6 +56,161 @@ struct scsi_cdb_s {
56 56
57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ 57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
58 58
59#define SCSI_SENSE_CUR_ERR 0x70
60#define SCSI_SENSE_DEF_ERR 0x71
61
62/*
63 * SCSI additional sense codes
64 */
65#define SCSI_ASC_LUN_NOT_READY 0x04
66#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
67#define SCSI_ASC_TOCC 0x3F
68
69/*
70 * SCSI additional sense code qualifiers
71 */
72#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
73#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
74
75/*
76 * Methods of reporting informational exceptions
77 */
78#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
79
80struct scsi_report_luns_data_s {
81 u32 lun_list_length; /* length of LUN list length */
82 u32 reserved;
83 struct scsi_lun lun[1]; /* first LUN in lun list */
84};
85
86struct scsi_inquiry_vendor_s {
87 u8 vendor_id[8];
88};
89
90struct scsi_inquiry_prodid_s {
91 u8 product_id[16];
92};
93
94struct scsi_inquiry_prodrev_s {
95 u8 product_rev[4];
96};
97
98struct scsi_inquiry_data_s {
99#ifdef __BIG_ENDIAN
100 u8 peripheral_qual:3; /* peripheral qualifier */
101 u8 device_type:5; /* peripheral device type */
102 u8 rmb:1; /* removable medium bit */
103 u8 device_type_mod:7; /* device type modifier */
104 u8 version;
105 u8 aenc:1; /* async evt notification capability */
106 u8 trm_iop:1; /* terminate I/O process */
107 u8 norm_aca:1; /* normal ACA supported */
108 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
109 u8 rsp_data_format:4;
110 u8 additional_len;
111 u8 sccs:1;
112 u8 reserved1:7;
113 u8 reserved2:1;
114 u8 enc_serv:1; /* enclosure service component */
115 u8 reserved3:1;
116 u8 multi_port:1; /* multi-port device */
117 u8 m_chngr:1; /* device in medium transport element */
118 u8 ack_req_q:1; /* SIP specific bit */
119 u8 addr32:1; /* SIP specific bit */
120 u8 addr16:1; /* SIP specific bit */
121 u8 rel_adr:1; /* relative address */
122 u8 w_bus32:1;
123 u8 w_bus16:1;
124 u8 synchronous:1;
125 u8 linked_commands:1;
126 u8 trans_dis:1;
127 u8 cmd_queue:1; /* command queueing supported */
128 u8 soft_reset:1; /* soft reset alternative (VS) */
129#else
130 u8 device_type:5; /* peripheral device type */
131 u8 peripheral_qual:3; /* peripheral qualifier */
132 u8 device_type_mod:7; /* device type modifier */
133 u8 rmb:1; /* removable medium bit */
134 u8 version;
135 u8 rsp_data_format:4;
136 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
137 u8 norm_aca:1; /* normal ACA supported */
138 u8 terminate_iop:1;/* terminate I/O process */
139 u8 aenc:1; /* async evt notification capability */
140 u8 additional_len;
141 u8 reserved1:7;
142 u8 sccs:1;
143 u8 addr16:1; /* SIP specific bit */
144 u8 addr32:1; /* SIP specific bit */
145 u8 ack_req_q:1; /* SIP specific bit */
146 u8 m_chngr:1; /* device in medium transport element */
147 u8 multi_port:1; /* multi-port device */
148 u8 reserved3:1; /* TBD - Vendor Specific */
149 u8 enc_serv:1; /* enclosure service component */
150 u8 reserved2:1;
151 u8 soft_seset:1; /* soft reset alternative (VS) */
152 u8 cmd_queue:1; /* command queueing supported */
153 u8 trans_dis:1;
154 u8 linked_commands:1;
155 u8 synchronous:1;
156 u8 w_bus16:1;
157 u8 w_bus32:1;
158 u8 rel_adr:1; /* relative address */
159#endif
160 struct scsi_inquiry_vendor_s vendor_id;
161 struct scsi_inquiry_prodid_s product_id;
162 struct scsi_inquiry_prodrev_s product_rev;
163 u8 vendor_specific[20];
164 u8 reserved4[40];
165};
166
167/*
168 * SCSI sense data format
169 */
170struct scsi_sense_s {
171#ifdef __BIG_ENDIAN
172 u8 valid:1;
173 u8 rsp_code:7;
174#else
175 u8 rsp_code:7;
176 u8 valid:1;
177#endif
178 u8 seg_num;
179#ifdef __BIG_ENDIAN
180 u8 file_mark:1;
181 u8 eom:1; /* end of media */
182 u8 ili:1; /* incorrect length indicator */
183 u8 reserved:1;
184 u8 sense_key:4;
185#else
186 u8 sense_key:4;
187 u8 reserved:1;
188 u8 ili:1; /* incorrect length indicator */
189 u8 eom:1; /* end of media */
190 u8 file_mark:1;
191#endif
192 u8 information[4]; /* device-type or cmd specific info */
193 u8 add_sense_length; /* additional sense length */
194 u8 command_info[4];/* command specific information */
195 u8 asc; /* additional sense code */
196 u8 ascq; /* additional sense code qualifier */
197 u8 fru_code; /* field replaceable unit code */
198#ifdef __BIG_ENDIAN
199 u8 sksv:1; /* sense key specific valid */
200 u8 c_d:1; /* command/data bit */
201 u8 res1:2;
202 u8 bpv:1; /* bit pointer valid */
203 u8 bpointer:3; /* bit pointer */
204#else
205 u8 bpointer:3; /* bit pointer */
206 u8 bpv:1; /* bit pointer valid */
207 u8 res1:2;
208 u8 c_d:1; /* command/data bit */
209 u8 sksv:1; /* sense key specific valid */
210#endif
211 u8 fpointer[2]; /* field pointer */
212};
213
59/* 214/*
60 * Fibre Channel Header Structure (FCHS) definition 215 * Fibre Channel Header Structure (FCHS) definition
61 */ 216 */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index a4e7951c6063..e07bd4745d8b 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,6 +24,9 @@ BFA_TRC_FILE(HAL, FCPIM);
24 * BFA ITNIM Related definitions 24 * BFA ITNIM Related definitions
25 */ 25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
29static void bfa_ioim_lm_init(struct bfa_s *bfa);
27 30
28#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ 31#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
29 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) 32 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
@@ -57,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
57 } \ 60 } \
58} while (0) 61} while (0)
59 62
63#define bfa_ioim_rp_wwn(__ioim) \
64 (((struct bfa_fcs_rport_s *) \
65 (__ioim)->itnim->rport->rport_drv)->pwwn)
66
67#define bfa_ioim_lp_wwn(__ioim) \
68 ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
69 (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
70
60#define bfa_itnim_sler_cb(__itnim) do { \ 71#define bfa_itnim_sler_cb(__itnim) do { \
61 if ((__itnim)->bfa->fcs) \ 72 if ((__itnim)->bfa->fcs) \
62 bfa_cb_itnim_sler((__itnim)->ditn); \ 73 bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -66,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
66 } \ 77 } \
67} while (0) 78} while (0)
68 79
80enum bfa_ioim_lm_status {
81 BFA_IOIM_LM_PRESENT = 1,
82 BFA_IOIM_LM_LUN_NOT_SUP = 2,
83 BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
84 BFA_IOIM_LM_LUN_NOT_RDY = 4,
85};
86
87enum bfa_ioim_lm_ua_status {
88 BFA_IOIM_LM_UA_RESET = 0,
89 BFA_IOIM_LM_UA_SET = 1,
90};
91
69/* 92/*
70 * itnim state machine event 93 * itnim state machine event
71 */ 94 */
@@ -122,6 +145,9 @@ enum bfa_ioim_event {
122 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ 145 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
123 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ 146 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
124 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ 147 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
148 BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
149 BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
150 BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
125}; 151};
126 152
127 153
@@ -219,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
219static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); 245static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
220static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); 246static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
221static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 247static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
248static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
249static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
250static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
222 251
223/* 252/*
224 * forward declaration of BFA IO state machine 253 * forward declaration of BFA IO state machine
@@ -416,6 +445,12 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
416 bfa_fcpim_add_iostats(lstats, rstats, output_reqs); 445 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
417 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); 446 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
418 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); 447 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
448 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
449 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
450 bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
451 bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
452 bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
453 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
419} 454}
420 455
421bfa_status_t 456bfa_status_t
@@ -437,6 +472,59 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
437 return BFA_STATUS_OK; 472 return BFA_STATUS_OK;
438} 473}
439 474
475void
476bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
477{
478 struct bfa_itnim_latency_s *io_lat =
479 &(ioim->itnim->ioprofile.io_latency);
480 u32 val, idx;
481
482 val = (u32)(jiffies - ioim->start_time);
483 idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
484 bfa_itnim_ioprofile_update(ioim->itnim, idx);
485
486 io_lat->count[idx]++;
487 io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
488 io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
489 io_lat->avg[idx] += val;
490}
491
492void
493bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
494{
495 ioim->start_time = jiffies;
496}
497
498bfa_status_t
499bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
500{
501 struct bfa_itnim_s *itnim;
502 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
503 struct list_head *qe, *qen;
504
505 /* accumulate IO stats from itnim */
506 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
507 itnim = (struct bfa_itnim_s *) qe;
508 bfa_itnim_clear_stats(itnim);
509 }
510 fcpim->io_profile = BFA_TRUE;
511 fcpim->io_profile_start_time = time;
512 fcpim->profile_comp = bfa_ioim_profile_comp;
513 fcpim->profile_start = bfa_ioim_profile_start;
514 return BFA_STATUS_OK;
515}
516
517bfa_status_t
518bfa_fcpim_profile_off(struct bfa_s *bfa)
519{
520 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
521 fcpim->io_profile = BFA_FALSE;
522 fcpim->io_profile_start_time = 0;
523 fcpim->profile_comp = NULL;
524 fcpim->profile_start = NULL;
525 return BFA_STATUS_OK;
526}
527
440u16 528u16
441bfa_fcpim_qdepth_get(struct bfa_s *bfa) 529bfa_fcpim_qdepth_get(struct bfa_s *bfa)
442{ 530{
@@ -1401,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1401 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); 1489 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1402} 1490}
1403 1491
1492#define bfa_io_lat_clock_res_div HZ
1493#define bfa_io_lat_clock_res_mul 1000
1494bfa_status_t
1495bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1496 struct bfa_itnim_ioprofile_s *ioprofile)
1497{
1498 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1499 if (!fcpim->io_profile)
1500 return BFA_STATUS_IOPROFILE_OFF;
1501
1502 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1503 itnim->ioprofile.io_profile_start_time =
1504 bfa_io_profile_start_time(itnim->bfa);
1505 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1506 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1507 *ioprofile = itnim->ioprofile;
1508
1509 return BFA_STATUS_OK;
1510}
1511
1404void 1512void
1405bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) 1513bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1406{ 1514{
@@ -1469,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1469 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1577 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1470 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); 1578 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1471 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1579 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1472 __bfa_cb_ioim_abort, ioim); 1580 __bfa_cb_ioim_abort, ioim);
1581 break;
1582
1583 case BFA_IOIM_SM_LM_LUN_NOT_SUP:
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585 bfa_ioim_move_to_comp_q(ioim);
1586 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1587 __bfa_cb_ioim_lm_lun_not_sup, ioim);
1588 break;
1589
1590 case BFA_IOIM_SM_LM_RPL_DC:
1591 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1592 bfa_ioim_move_to_comp_q(ioim);
1593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1594 __bfa_cb_ioim_lm_rpl_dc, ioim);
1595 break;
1596
1597 case BFA_IOIM_SM_LM_LUN_NOT_RDY:
1598 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599 bfa_ioim_move_to_comp_q(ioim);
1600 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1601 __bfa_cb_ioim_lm_lun_not_rdy, ioim);
1473 break; 1602 break;
1474 1603
1475 default: 1604 default:
@@ -2009,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2009 } 2138 }
2010} 2139}
2011 2140
2141/*
2142 * This is called from bfa_fcpim_start after the bfa_init() with flash read
2143 * is complete by driver. now invalidate the stale content of lun mask
2144 * like unit attention, rp tag and lp tag.
2145 */
2146static void
2147bfa_ioim_lm_init(struct bfa_s *bfa)
2148{
2149 struct bfa_lun_mask_s *lunm_list;
2150 int i;
2151
2152 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2153 return;
2154
2155 lunm_list = bfa_get_lun_mask_list(bfa);
2156 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2157 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2158 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2159 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2160 }
2161}
2162
2163/*
2164 * Validate LUN for LUN masking
2165 */
2166static enum bfa_ioim_lm_status
2167bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
2168 struct bfa_rport_s *rp, struct scsi_lun lun)
2169{
2170 u8 i;
2171 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2173 struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
2174
2175 if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
2176 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2177 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2178 return BFA_IOIM_LM_PRESENT;
2179 }
2180
2181 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2182
2183 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2184 continue;
2185
2186 if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
2187 scsilun_to_int((struct scsi_lun *)&lun))
2188 && (rp->rport_tag == lun_list[i].rp_tag)
2189 && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
2190 lun_list[i].lp_tag)) {
2191 bfa_trc(ioim->bfa, lun_list[i].rp_tag);
2192 bfa_trc(ioim->bfa, lun_list[i].lp_tag);
2193 bfa_trc(ioim->bfa, scsilun_to_int(
2194 (struct scsi_lun *)&lun_list[i].lun));
2195
2196 if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
2197 ((cdb->scsi_cdb[0] != INQUIRY) ||
2198 (cdb->scsi_cdb[0] != REPORT_LUNS))) {
2199 lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
2200 return BFA_IOIM_LM_RPL_DATA_CHANGED;
2201 }
2202
2203 if (cdb->scsi_cdb[0] == REPORT_LUNS)
2204 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2205
2206 return BFA_IOIM_LM_PRESENT;
2207 }
2208 }
2209
2210 if ((cdb->scsi_cdb[0] == INQUIRY) &&
2211 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2212 ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
2213 return BFA_IOIM_LM_PRESENT;
2214 }
2215
2216 if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
2217 return BFA_IOIM_LM_LUN_NOT_RDY;
2218
2219 return BFA_IOIM_LM_LUN_NOT_SUP;
2220}
2221
2222static bfa_boolean_t
2223bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
2224{
2225 return BFA_TRUE;
2226}
2227
2228static void
2229bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
2230 int buf_lun_cnt)
2231{
2232 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2233 struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
2234 struct scsi_lun lun;
2235 int i, j;
2236
2237 bfa_trc(ioim->bfa, buf_lun_cnt);
2238 for (j = 0; j < buf_lun_cnt; j++) {
2239 lun = *((struct scsi_lun *)(lun_data + j));
2240 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2241 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2242 continue;
2243 if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
2244 (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
2245 (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
2246 == scsilun_to_int((struct scsi_lun *)&lun))) {
2247 lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
2248 break;
2249 }
2250 } /* next lun in mask DB */
2251 } /* next lun in buf */
2252}
2253
2254static int
2255bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
2256 struct scsi_report_luns_data_s *rl)
2257{
2258 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2259 struct scatterlist *sg = scsi_sglist(cmnd);
2260 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2261 struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
2262 int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
2263 int lun_across_sg_bytes, bytes_from_next_buf;
2264 u64 last_lun, temp_last_lun;
2265
2266 /* fetch luns from the first sg element */
2267 bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
2268 (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
2269
2270 /* fetch luns from multiple sg elements */
2271 scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
2272 if (sgeid == 0) {
2273 prev_sg_len = sg_dma_len(sg);
2274 prev_rl_data = (struct scsi_lun *)
2275 phys_to_virt(sg_dma_address(sg));
2276 continue;
2277 }
2278
2279 /* if the buf is having more data */
2280 lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
2281 if (lun_across_sg_bytes) {
2282 bfa_trc(ioim->bfa, lun_across_sg_bytes);
2283 bfa_stats(ioim->itnim, lm_lun_across_sg);
2284 bytes_from_next_buf = sizeof(struct scsi_lun) -
2285 lun_across_sg_bytes;
2286
2287 /* from next buf take higher bytes */
2288 temp_last_lun = *((u64 *)
2289 phys_to_virt(sg_dma_address(sg)));
2290 last_lun |= temp_last_lun >>
2291 (lun_across_sg_bytes * BITS_PER_BYTE);
2292
2293 /* from prev buf take higher bytes */
2294 temp_last_lun = *((u64 *)(prev_rl_data +
2295 (prev_sg_len - lun_across_sg_bytes)));
2296 temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
2297 last_lun = last_lun | (temp_last_lun <<
2298 (bytes_from_next_buf * BITS_PER_BYTE));
2299
2300 bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
2301 } else
2302 bytes_from_next_buf = 0;
2303
2304 *pgdlen += sg_dma_len(sg);
2305 prev_sg_len = sg_dma_len(sg);
2306 prev_rl_data = (struct scsi_lun *)
2307 phys_to_virt(sg_dma_address(sg));
2308 bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
2309 bytes_from_next_buf,
2310 sg_dma_len(sg) / sizeof(struct scsi_lun));
2311 }
2312
2313 /* update the report luns data - based on fetched luns */
2314 sg = scsi_sglist(cmnd);
2315 base_rl_data = (struct scsi_lun *)rl->lun;
2316 base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
2317 for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
2318 if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
2319 base_rl_data[j] = lun_list[i].lun;
2320 lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
2321 j++;
2322 lun_fetched_cnt++;
2323 }
2324
2325 if (j > base_count) {
2326 j = 0;
2327 sg = sg_next(sg);
2328 base_rl_data = (struct scsi_lun *)
2329 phys_to_virt(sg_dma_address(sg));
2330 base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
2331 }
2332 }
2333
2334 bfa_trc(ioim->bfa, lun_fetched_cnt);
2335 return lun_fetched_cnt;
2336}
2337
2338static bfa_boolean_t
2339bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
2340{
2341 struct scsi_inquiry_data_s *inq;
2342 struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
2343
2344 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2345 inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
2346
2347 bfa_trc(ioim->bfa, inq->device_type);
2348 inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
2349 return 0;
2350}
2351
2352static bfa_boolean_t
2353bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
2354{
2355 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2356 struct scatterlist *sg = scsi_sglist(cmnd);
2357 struct bfi_ioim_rsp_s *m;
2358 struct scsi_report_luns_data_s *rl = NULL;
2359 int lun_count = 0, lun_fetched_cnt = 0;
2360 u32 residue, pgdlen = 0;
2361
2362 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2363 if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
2364 return BFA_TRUE;
2365
2366 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2367 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
2368 return BFA_TRUE;
2369
2370 pgdlen = sg_dma_len(sg);
2371 bfa_trc(ioim->bfa, pgdlen);
2372 rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
2373 lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
2374 lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
2375
2376 if (lun_count == lun_fetched_cnt)
2377 return BFA_TRUE;
2378
2379 bfa_trc(ioim->bfa, lun_count);
2380 bfa_trc(ioim->bfa, lun_fetched_cnt);
2381 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2382
2383 if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
2384 rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
2385 sizeof(struct scsi_lun);
2386 else
2387 bfa_stats(ioim->itnim, lm_small_buf_addresidue);
2388
2389 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2390 bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
2391
2392 residue = be32_to_cpu(m->residue);
2393 residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
2394 bfa_stats(ioim->itnim, lm_wire_residue_changed);
2395 m->residue = be32_to_cpu(residue);
2396 bfa_trc(ioim->bfa, ioim->nsges);
2397 return BFA_FALSE;
2398}
2012 2399
2013static void 2400static void
2014__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) 2401__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
@@ -2068,6 +2455,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2068} 2455}
2069 2456
2070static void 2457static void
2458__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
2459{
2460 struct bfa_ioim_s *ioim = cbarg;
2461 int sns_len = 0xD;
2462 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2463 struct scsi_sense_s *snsinfo;
2464
2465 if (!complete) {
2466 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2467 return;
2468 }
2469
2470 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2471 ioim->fcpim->fcp, ioim->iotag);
2472 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2473 snsinfo->add_sense_length = 0xa;
2474 snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
2475 snsinfo->sense_key = ILLEGAL_REQUEST;
2476 bfa_trc(ioim->bfa, residue);
2477 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2478 SCSI_STATUS_CHECK_CONDITION, sns_len,
2479 (u8 *)snsinfo, residue);
2480}
2481
2482static void
2483__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
2484{
2485 struct bfa_ioim_s *ioim = cbarg;
2486 int sns_len = 0xD;
2487 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2488 struct scsi_sense_s *snsinfo;
2489
2490 if (!complete) {
2491 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2492 return;
2493 }
2494
2495 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2496 ioim->iotag);
2497 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2498 snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
2499 snsinfo->asc = SCSI_ASC_TOCC;
2500 snsinfo->add_sense_length = 0x6;
2501 snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
2502 bfa_trc(ioim->bfa, residue);
2503 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2504 SCSI_STATUS_CHECK_CONDITION, sns_len,
2505 (u8 *)snsinfo, residue);
2506}
2507
2508static void
2509__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
2510{
2511 struct bfa_ioim_s *ioim = cbarg;
2512 int sns_len = 0xD;
2513 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2514 struct scsi_sense_s *snsinfo;
2515
2516 if (!complete) {
2517 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2518 return;
2519 }
2520
2521 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2522 ioim->fcpim->fcp, ioim->iotag);
2523 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2524 snsinfo->add_sense_length = 0xa;
2525 snsinfo->sense_key = NOT_READY;
2526 snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
2527 snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
2528 bfa_trc(ioim->bfa, residue);
2529 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2530 SCSI_STATUS_CHECK_CONDITION, sns_len,
2531 (u8 *)snsinfo, residue);
2532}
2533
2534void
2535bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2536 u16 rp_tag, u8 lp_tag)
2537{
2538 struct bfa_lun_mask_s *lun_list;
2539 u8 i;
2540
2541 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2542 return;
2543
2544 lun_list = bfa_get_lun_mask_list(bfa);
2545 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2546 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2547 if ((lun_list[i].lp_wwn == lp_wwn) &&
2548 (lun_list[i].rp_wwn == rp_wwn)) {
2549 lun_list[i].rp_tag = rp_tag;
2550 lun_list[i].lp_tag = lp_tag;
2551 }
2552 }
2553 }
2554}
2555
2556/*
2557 * set UA for all active luns in LM DB
2558 */
2559static void
2560bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2561{
2562 struct bfa_lun_mask_s *lunm_list;
2563 int i;
2564
2565 lunm_list = bfa_get_lun_mask_list(bfa);
2566 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2567 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2568 continue;
2569 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2570 }
2571}
2572
2573bfa_status_t
2574bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2575{
2576 struct bfa_lunmask_cfg_s *lun_mask;
2577
2578 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2579 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2580 return BFA_STATUS_FAILED;
2581
2582 if (bfa_get_lun_mask_status(bfa) == update)
2583 return BFA_STATUS_NO_CHANGE;
2584
2585 lun_mask = bfa_get_lun_mask(bfa);
2586 lun_mask->status = update;
2587
2588 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2589 bfa_ioim_lm_set_ua(bfa);
2590
2591 return bfa_dconf_update(bfa);
2592}
2593
2594bfa_status_t
2595bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2596{
2597 int i;
2598 struct bfa_lun_mask_s *lunm_list;
2599
2600 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2601 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2602 return BFA_STATUS_FAILED;
2603
2604 lunm_list = bfa_get_lun_mask_list(bfa);
2605 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2606 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2607 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2608 bfa_rport_unset_lunmask(bfa,
2609 BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2610 }
2611 }
2612
2613 memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2614 return bfa_dconf_update(bfa);
2615}
2616
2617bfa_status_t
2618bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2619{
2620 struct bfa_lunmask_cfg_s *lun_mask;
2621
2622 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2623 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2624 return BFA_STATUS_FAILED;
2625
2626 lun_mask = bfa_get_lun_mask(bfa);
2627 memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2628 return BFA_STATUS_OK;
2629}
2630
2631bfa_status_t
2632bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2633 wwn_t rpwwn, struct scsi_lun lun)
2634{
2635 struct bfa_lun_mask_s *lunm_list;
2636 struct bfa_rport_s *rp = NULL;
2637 int i, free_index = MAX_LUN_MASK_CFG + 1;
2638 struct bfa_fcs_lport_s *port = NULL;
2639 struct bfa_fcs_rport_s *rp_fcs;
2640
2641 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2642 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2643 return BFA_STATUS_FAILED;
2644
2645 port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2646 vf_id, *pwwn);
2647 if (port) {
2648 *pwwn = port->port_cfg.pwwn;
2649 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2650 rp = rp_fcs->bfa_rport;
2651 }
2652
2653 lunm_list = bfa_get_lun_mask_list(bfa);
2654 /* if entry exists */
2655 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2656 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2657 free_index = i;
2658 if ((lunm_list[i].lp_wwn == *pwwn) &&
2659 (lunm_list[i].rp_wwn == rpwwn) &&
2660 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2661 scsilun_to_int((struct scsi_lun *)&lun)))
2662 return BFA_STATUS_ENTRY_EXISTS;
2663 }
2664
2665 if (free_index > MAX_LUN_MASK_CFG)
2666 return BFA_STATUS_MAX_ENTRY_REACHED;
2667
2668 if (rp) {
2669 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2670 rp->rport_info.local_pid);
2671 lunm_list[free_index].rp_tag = rp->rport_tag;
2672 } else {
2673 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2674 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2675 }
2676
2677 lunm_list[free_index].lp_wwn = *pwwn;
2678 lunm_list[free_index].rp_wwn = rpwwn;
2679 lunm_list[free_index].lun = lun;
2680 lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2681
2682 /* set for all luns in this rp */
2683 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2684 if ((lunm_list[i].lp_wwn == *pwwn) &&
2685 (lunm_list[i].rp_wwn == rpwwn))
2686 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2687 }
2688
2689 return bfa_dconf_update(bfa);
2690}
2691
2692bfa_status_t
2693bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2694 wwn_t rpwwn, struct scsi_lun lun)
2695{
2696 struct bfa_lun_mask_s *lunm_list;
2697 struct bfa_rport_s *rp = NULL;
2698 struct bfa_fcs_lport_s *port = NULL;
2699 struct bfa_fcs_rport_s *rp_fcs;
2700 int i;
2701
2702 /* in min cfg lunm_list could be NULL but no commands should run. */
2703 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2704 return BFA_STATUS_FAILED;
2705
2706 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2707 bfa_trc(bfa, *pwwn);
2708 bfa_trc(bfa, rpwwn);
2709 bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2710
2711 if (*pwwn == 0) {
2712 port = bfa_fcs_lookup_port(
2713 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2714 vf_id, *pwwn);
2715 if (port) {
2716 *pwwn = port->port_cfg.pwwn;
2717 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2718 rp = rp_fcs->bfa_rport;
2719 }
2720 }
2721
2722 lunm_list = bfa_get_lun_mask_list(bfa);
2723 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2724 if ((lunm_list[i].lp_wwn == *pwwn) &&
2725 (lunm_list[i].rp_wwn == rpwwn) &&
2726 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2727 scsilun_to_int((struct scsi_lun *)&lun))) {
2728 lunm_list[i].lp_wwn = 0;
2729 lunm_list[i].rp_wwn = 0;
2730 int_to_scsilun(0, &lunm_list[i].lun);
2731 lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2732 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2733 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2734 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2735 }
2736 return bfa_dconf_update(bfa);
2737 }
2738 }
2739
2740 /* set for all luns in this rp */
2741 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2742 if ((lunm_list[i].lp_wwn == *pwwn) &&
2743 (lunm_list[i].rp_wwn == rpwwn))
2744 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2745 }
2746
2747 return BFA_STATUS_ENTRY_NOT_EXISTS;
2748}
2749
2750static void
2071__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) 2751__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2072{ 2752{
2073 struct bfa_ioim_s *ioim = cbarg; 2753 struct bfa_ioim_s *ioim = cbarg;
@@ -2077,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2077 return; 2757 return;
2078 } 2758 }
2079 2759
2760 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2080 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 2761 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2081 0, 0, NULL, 0); 2762 0, 0, NULL, 0);
2082} 2763}
@@ -2092,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2092 return; 2773 return;
2093 } 2774 }
2094 2775
2776 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2095 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 2777 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2096 0, 0, NULL, 0); 2778 0, 0, NULL, 0);
2097} 2779}
@@ -2106,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2106 return; 2788 return;
2107 } 2789 }
2108 2790
2791 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2109 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); 2792 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2110} 2793}
2111 2794
@@ -2449,6 +3132,7 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2449 ioim->bfa = fcpim->bfa; 3132 ioim->bfa = fcpim->bfa;
2450 ioim->fcpim = fcpim; 3133 ioim->fcpim = fcpim;
2451 ioim->iosp = iosp; 3134 ioim->iosp = iosp;
3135 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2452 INIT_LIST_HEAD(&ioim->sgpg_q); 3136 INIT_LIST_HEAD(&ioim->sgpg_q);
2453 bfa_reqq_winit(&ioim->iosp->reqq_wait, 3137 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2454 bfa_ioim_qresume, ioim); 3138 bfa_ioim_qresume, ioim);
@@ -2486,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2486 evt = BFA_IOIM_SM_DONE; 3170 evt = BFA_IOIM_SM_DONE;
2487 else 3171 else
2488 evt = BFA_IOIM_SM_COMP; 3172 evt = BFA_IOIM_SM_COMP;
3173 ioim->proc_rsp_data(ioim);
2489 break; 3174 break;
2490 3175
2491 case BFI_IOIM_STS_TIMEDOUT: 3176 case BFI_IOIM_STS_TIMEDOUT:
@@ -2521,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2521 if (rsp->abort_tag != ioim->abort_tag) { 3206 if (rsp->abort_tag != ioim->abort_tag) {
2522 bfa_trc(ioim->bfa, rsp->abort_tag); 3207 bfa_trc(ioim->bfa, rsp->abort_tag);
2523 bfa_trc(ioim->bfa, ioim->abort_tag); 3208 bfa_trc(ioim->bfa, ioim->abort_tag);
3209 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2524 return; 3210 return;
2525 } 3211 }
2526 3212
@@ -2539,6 +3225,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2539 WARN_ON(1); 3225 WARN_ON(1);
2540 } 3226 }
2541 3227
3228 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2542 bfa_sm_send_event(ioim, evt); 3229 bfa_sm_send_event(ioim, evt);
2543} 3230}
2544 3231
@@ -2556,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2556 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag); 3243 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2557 3244
2558 bfa_ioim_cb_profile_comp(fcpim, ioim); 3245 bfa_ioim_cb_profile_comp(fcpim, ioim);
2559 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); 3246
3247 if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
3248 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3249 return;
3250 }
3251
3252 if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
3253 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3254 else
3255 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
2560} 3256}
2561 3257
2562/* 3258/*
@@ -2668,6 +3364,35 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
2668void 3364void
2669bfa_ioim_start(struct bfa_ioim_s *ioim) 3365bfa_ioim_start(struct bfa_ioim_s *ioim)
2670{ 3366{
3367 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
3368 struct bfa_lps_s *lps;
3369 enum bfa_ioim_lm_status status;
3370 struct scsi_lun scsilun;
3371
3372 if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
3373 lps = BFA_IOIM_TO_LPS(ioim);
3374 int_to_scsilun(cmnd->device->lun, &scsilun);
3375 status = bfa_ioim_lm_check(ioim, lps,
3376 ioim->itnim->rport, scsilun);
3377 if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
3378 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
3379 bfa_stats(ioim->itnim, lm_lun_not_rdy);
3380 return;
3381 }
3382
3383 if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
3384 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
3385 bfa_stats(ioim->itnim, lm_lun_not_sup);
3386 return;
3387 }
3388
3389 if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
3390 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
3391 bfa_stats(ioim->itnim, lm_rpl_data_changed);
3392 return;
3393 }
3394 }
3395
2671 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 3396 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2672 3397
2673 /* 3398 /*
@@ -3411,6 +4136,13 @@ bfa_fcp_detach(struct bfa_s *bfa)
3411static void 4136static void
3412bfa_fcp_start(struct bfa_s *bfa) 4137bfa_fcp_start(struct bfa_s *bfa)
3413{ 4138{
4139 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4140
4141 /*
4142 * bfa_init() with flash read is complete. now invalidate the stale
4143 * content of lun mask like unit attention, rp tag and lp tag.
4144 */
4145 bfa_ioim_lm_init(fcp->bfa);
3414} 4146}
3415 4147
3416static void 4148static void
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 57b695ad4ee5..1080bcb81cb7 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -79,14 +79,22 @@ bfa_ioim_get_index(u32 n) {
79 if (n >= (1UL)<<22) 79 if (n >= (1UL)<<22)
80 return BFA_IOBUCKET_MAX - 1; 80 return BFA_IOBUCKET_MAX - 1;
81 n >>= 8; 81 n >>= 8;
82 if (n >= (1UL)<<16) 82 if (n >= (1UL)<<16) {
83 n >>= 16; pos += 16; 83 n >>= 16;
84 if (n >= 1 << 8) 84 pos += 16;
85 n >>= 8; pos += 8; 85 }
86 if (n >= 1 << 4) 86 if (n >= 1 << 8) {
87 n >>= 4; pos += 4; 87 n >>= 8;
88 if (n >= 1 << 2) 88 pos += 8;
89 n >>= 2; pos += 2; 89 }
90 if (n >= 1 << 4) {
91 n >>= 4;
92 pos += 4;
93 }
94 if (n >= 1 << 2) {
95 n >>= 2;
96 pos += 2;
97 }
90 if (n >= 1 << 1) 98 if (n >= 1 << 1)
91 pos += 1; 99 pos += 1;
92 100
@@ -102,6 +110,7 @@ struct bfad_ioim_s;
102struct bfad_tskim_s; 110struct bfad_tskim_s;
103 111
104typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); 112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
113typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
105 114
106struct bfa_fcpim_s { 115struct bfa_fcpim_s {
107 struct bfa_s *bfa; 116 struct bfa_s *bfa;
@@ -115,7 +124,7 @@ struct bfa_fcpim_s {
115 u32 path_tov; 124 u32 path_tov;
116 u16 q_depth; 125 u16 q_depth;
117 u8 reqq; /* Request queue to be used */ 126 u8 reqq; /* Request queue to be used */
118 u8 rsvd; 127 u8 lun_masking_pending;
119 struct list_head itnim_q; /* queue of active itnim */ 128 struct list_head itnim_q; /* queue of active itnim */
120 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 129 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
121 struct list_head ioim_comp_q; /* IO global comp Q */ 130 struct list_head ioim_comp_q; /* IO global comp Q */
@@ -170,7 +179,9 @@ struct bfa_ioim_s {
170 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ 179 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
171 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ 180 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
172 u8 reqq; /* Request queue for I/O */ 181 u8 reqq; /* Request queue for I/O */
182 u8 mode; /* IO is passthrough or not */
173 u64 start_time; /* IO's Profile start val */ 183 u64 start_time; /* IO's Profile start val */
184 bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
174}; 185};
175 186
176struct bfa_ioim_sp_s { 187struct bfa_ioim_sp_s {
@@ -250,6 +261,10 @@ struct bfa_itnim_s {
250 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ 261 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
251} while (0) 262} while (0)
252 263
264#define BFA_IOIM_TO_LPS(__ioim) \
265 BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
266 __ioim->itnim->rport->rport_info.lp_tag)
267
253static inline bfa_boolean_t 268static inline bfa_boolean_t
254bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) 269bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
255{ 270{
@@ -297,6 +312,8 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
297 struct bfa_itnim_iostats_s *stats, u8 lp_tag); 312 struct bfa_itnim_iostats_s *stats, u8 lp_tag);
298void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats, 313void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
299 struct bfa_itnim_iostats_s *itnim_stats); 314 struct bfa_itnim_iostats_s *itnim_stats);
315bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
316bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
300 317
301#define bfa_fcpim_ioredirect_enabled(__bfa) \ 318#define bfa_fcpim_ioredirect_enabled(__bfa) \
302 (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect) 319 (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
@@ -397,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim,
397void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, 414void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
398 enum bfi_tskim_status tsk_status); 415 enum bfi_tskim_status tsk_status);
399 416
417void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
418 wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
419bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
420bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
421bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
422 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
423bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
424 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
425bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
426
400#endif /* __BFA_FCPIM_H__ */ 427#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index a9b22bc48bc3..eaac57e1ddec 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h"
23#include "bfa_fcs.h" 24#include "bfa_fcs.h"
24#include "bfa_fcbuild.h" 25#include "bfa_fcbuild.h"
25 26
@@ -1327,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1327 bfa_trc(fabric->fcs, status); 1328 bfa_trc(fabric->fcs, status);
1328} 1329}
1329 1330
1331
1332/*
1333 * Send AEN notification
1334 */
1335static void
1336bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
1337 enum bfa_port_aen_event event)
1338{
1339 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
1340 struct bfa_aen_entry_s *aen_entry;
1341
1342 bfad_get_aen_entry(bfad, aen_entry);
1343 if (!aen_entry)
1344 return;
1345
1346 aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
1347 aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
1348
1349 /* Send the AEN notification */
1350 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
1351 BFA_AEN_CAT_PORT, event);
1352}
1353
1330/* 1354/*
1331 * 1355 *
1332 * @param[in] fabric - fabric 1356 * @param[in] fabric - fabric
@@ -1358,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1358 BFA_LOG(KERN_WARNING, bfad, bfa_log_level, 1382 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1359 "Base port WWN = %s Fabric WWN = %s\n", 1383 "Base port WWN = %s Fabric WWN = %s\n",
1360 pwwn_ptr, fwwn_ptr); 1384 pwwn_ptr, fwwn_ptr);
1385 bfa_fcs_fabric_aen_post(&fabric->bport,
1386 BFA_PORT_AEN_FABRIC_NAME_CHANGE);
1361 } 1387 }
1362} 1388}
1363 1389
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index a5f1faf335a7..e75e07d25915 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -675,6 +675,7 @@ struct bfa_fcs_s {
675 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */ 675 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
676 struct bfa_fcs_stats_s stats; /* FCS statistics */ 676 struct bfa_fcs_stats_s stats; /* FCS statistics */
677 struct bfa_wc_s wc; /* waiting counter */ 677 struct bfa_wc_s wc; /* waiting counter */
678 int fcs_aen_seq;
678}; 679};
679 680
680/* 681/*
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 29b4108be269..9272840a2409 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -37,6 +37,8 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
37 struct bfa_fcxp_s *fcxp, void *cbarg, 37 struct bfa_fcxp_s *fcxp, void *cbarg,
38 bfa_status_t req_status, u32 rsp_len, 38 bfa_status_t req_status, u32 rsp_len,
39 u32 resid_len, struct fchs_s *rsp_fchs); 39 u32 resid_len, struct fchs_s *rsp_fchs);
40static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
41 enum bfa_itnim_aen_event event);
40 42
41/* 43/*
42 * fcs_itnim_sm FCS itnim state machine events 44 * fcs_itnim_sm FCS itnim state machine events
@@ -269,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
269 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 271 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
270 "Target (WWN = %s) is online for initiator (WWN = %s)\n", 272 "Target (WWN = %s) is online for initiator (WWN = %s)\n",
271 rpwwn_buf, lpwwn_buf); 273 rpwwn_buf, lpwwn_buf);
274 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
272 break; 275 break;
273 276
274 case BFA_FCS_ITNIM_SM_OFFLINE: 277 case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -305,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
305 bfa_itnim_offline(itnim->bfa_itnim); 308 bfa_itnim_offline(itnim->bfa_itnim);
306 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); 309 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
307 wwn2str(rpwwn_buf, itnim->rport->pwwn); 310 wwn2str(rpwwn_buf, itnim->rport->pwwn);
308 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) 311 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
309 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 312 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
310 "Target (WWN = %s) connectivity lost for " 313 "Target (WWN = %s) connectivity lost for "
311 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); 314 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
312 else 315 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
316 } else {
313 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 317 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
314 "Target (WWN = %s) offlined by initiator (WWN = %s)\n", 318 "Target (WWN = %s) offlined by initiator (WWN = %s)\n",
315 rpwwn_buf, lpwwn_buf); 319 rpwwn_buf, lpwwn_buf);
320 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
321 }
316 break; 322 break;
317 323
318 case BFA_FCS_ITNIM_SM_DELETE: 324 case BFA_FCS_ITNIM_SM_DELETE:
@@ -382,6 +388,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
382} 388}
383 389
384static void 390static void
391bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
392 enum bfa_itnim_aen_event event)
393{
394 struct bfa_fcs_rport_s *rport = itnim->rport;
395 struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
396 struct bfa_aen_entry_s *aen_entry;
397
398 /* Don't post events for well known addresses */
399 if (BFA_FCS_PID_IS_WKA(rport->pid))
400 return;
401
402 bfad_get_aen_entry(bfad, aen_entry);
403 if (!aen_entry)
404 return;
405
406 aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
407 aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
408 bfa_fcs_get_base_port(itnim->fcs));
409 aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
410 aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
411
412 /* Send the AEN notification */
413 bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
414 BFA_AEN_CAT_ITNIM, event);
415}
416
417static void
385bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) 418bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
386{ 419{
387 struct bfa_fcs_itnim_s *itnim = itnim_cbarg; 420 struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index f8251a91ba91..d4f951fe753e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_fcs.h" 20#include "bfa_fcs.h"
20#include "bfa_fcbuild.h" 21#include "bfa_fcbuild.h"
21#include "bfa_fc.h" 22#include "bfa_fc.h"
@@ -300,6 +301,31 @@ bfa_fcs_lport_sm_deleting(
300 */ 301 */
301 302
302/* 303/*
304 * Send AEN notification
305 */
306static void
307bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
308 enum bfa_lport_aen_event event)
309{
310 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
311 struct bfa_aen_entry_s *aen_entry;
312
313 bfad_get_aen_entry(bfad, aen_entry);
314 if (!aen_entry)
315 return;
316
317 aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
318 aen_entry->aen_data.lport.roles = port->port_cfg.roles;
319 aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
320 bfa_fcs_get_base_port(port->fcs));
321 aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
322
323 /* Send the AEN notification */
324 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
325 BFA_AEN_CAT_LPORT, event);
326}
327
328/*
303 * Send a LS reject 329 * Send a LS reject
304 */ 330 */
305static void 331static void
@@ -593,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
593 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 619 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
594 "Logical port online: WWN = %s Role = %s\n", 620 "Logical port online: WWN = %s Role = %s\n",
595 lpwwn_buf, "Initiator"); 621 lpwwn_buf, "Initiator");
622 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
596 623
597 bfad->bfad_flags |= BFAD_PORT_ONLINE; 624 bfad->bfad_flags |= BFAD_PORT_ONLINE;
598} 625}
@@ -611,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
611 638
612 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 639 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
613 if (bfa_sm_cmp_state(port->fabric, 640 if (bfa_sm_cmp_state(port->fabric,
614 bfa_fcs_fabric_sm_online) == BFA_TRUE) 641 bfa_fcs_fabric_sm_online) == BFA_TRUE) {
615 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 642 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
616 "Logical port lost fabric connectivity: WWN = %s Role = %s\n", 643 "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
617 lpwwn_buf, "Initiator"); 644 lpwwn_buf, "Initiator");
618 else 645 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
646 } else {
619 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 647 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
620 "Logical port taken offline: WWN = %s Role = %s\n", 648 "Logical port taken offline: WWN = %s Role = %s\n",
621 lpwwn_buf, "Initiator"); 649 lpwwn_buf, "Initiator");
650 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
651 }
622 652
623 list_for_each_safe(qe, qen, &port->rport_q) { 653 list_for_each_safe(qe, qen, &port->rport_q) {
624 rport = (struct bfa_fcs_rport_s *) qe; 654 rport = (struct bfa_fcs_rport_s *) qe;
@@ -676,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
676 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 706 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
677 "Logical port deleted: WWN = %s Role = %s\n", 707 "Logical port deleted: WWN = %s Role = %s\n",
678 lpwwn_buf, "Initiator"); 708 lpwwn_buf, "Initiator");
709 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
679 710
680 /* Base port will be deleted by the OS driver */ 711 /* Base port will be deleted by the OS driver */
681 if (port->vport) { 712 if (port->vport) {
@@ -973,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
973 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1004 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
974 "New logical port created: WWN = %s Role = %s\n", 1005 "New logical port created: WWN = %s Role = %s\n",
975 lpwwn_buf, "Initiator"); 1006 lpwwn_buf, "Initiator");
1007 bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
976 1008
977 bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit); 1009 bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
978 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 1010 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
@@ -5559,6 +5591,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5559 * fcs_vport_private FCS virtual port private functions 5591 * fcs_vport_private FCS virtual port private functions
5560 */ 5592 */
5561/* 5593/*
5594 * Send AEN notification
5595 */
5596static void
5597bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
5598 enum bfa_lport_aen_event event)
5599{
5600 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
5601 struct bfa_aen_entry_s *aen_entry;
5602
5603 bfad_get_aen_entry(bfad, aen_entry);
5604 if (!aen_entry)
5605 return;
5606
5607 aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
5608 aen_entry->aen_data.lport.roles = port->port_cfg.roles;
5609 aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
5610 bfa_fcs_get_base_port(port->fcs));
5611 aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
5612
5613 /* Send the AEN notification */
5614 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
5615 BFA_AEN_CAT_LPORT, event);
5616}
5617
5618/*
5562 * This routine will be called to send a FDISC command. 5619 * This routine will be called to send a FDISC command.
5563 */ 5620 */
5564static void 5621static void
@@ -5585,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5585 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ 5642 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
5586 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) 5643 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5587 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5644 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5588 else 5645 else {
5646 bfa_fcs_vport_aen_post(&vport->lport,
5647 BFA_LPORT_AEN_NPIV_DUP_WWN);
5589 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN); 5648 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
5649 }
5590 break; 5650 break;
5591 5651
5592 case FC_LS_RJT_EXP_INSUFF_RES: 5652 case FC_LS_RJT_EXP_INSUFF_RES:
@@ -5596,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5596 */ 5656 */
5597 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) 5657 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5598 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5658 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5599 else 5659 else {
5660 bfa_fcs_vport_aen_post(&vport->lport,
5661 BFA_LPORT_AEN_NPIV_FABRIC_MAX);
5600 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); 5662 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
5663 }
5601 break; 5664 break;
5602 5665
5603 default: 5666 default:
5667 if (vport->fdisc_retries == 0)
5668 bfa_fcs_vport_aen_post(&vport->lport,
5669 BFA_LPORT_AEN_NPIV_UNKNOWN);
5604 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5670 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5605 } 5671 }
5606} 5672}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 2c514458a6b4..52628d5d3c9b 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h"
23#include "bfa_fcs.h" 24#include "bfa_fcs.h"
24#include "bfa_fcbuild.h" 25#include "bfa_fcbuild.h"
25 26
@@ -2041,6 +2042,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2041} 2042}
2042 2043
2043static void 2044static void
2045bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2046 enum bfa_rport_aen_event event,
2047 struct bfa_rport_aen_data_s *data)
2048{
2049 struct bfa_fcs_lport_s *port = rport->port;
2050 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
2051 struct bfa_aen_entry_s *aen_entry;
2052
2053 bfad_get_aen_entry(bfad, aen_entry);
2054 if (!aen_entry)
2055 return;
2056
2057 if (event == BFA_RPORT_AEN_QOS_PRIO)
2058 aen_entry->aen_data.rport.priv.qos = data->priv.qos;
2059 else if (event == BFA_RPORT_AEN_QOS_FLOWID)
2060 aen_entry->aen_data.rport.priv.qos = data->priv.qos;
2061
2062 aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
2063 aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
2064 bfa_fcs_get_base_port(rport->fcs));
2065 aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
2066 aen_entry->aen_data.rport.rpwwn = rport->pwwn;
2067
2068 /* Send the AEN notification */
2069 bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
2070 BFA_AEN_CAT_RPORT, event);
2071}
2072
2073static void
2044bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) 2074bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2045{ 2075{
2046 struct bfa_fcs_lport_s *port = rport->port; 2076 struct bfa_fcs_lport_s *port = rport->port;
@@ -2063,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2063 2093
2064 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2094 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2065 wwn2str(rpwwn_buf, rport->pwwn); 2095 wwn2str(rpwwn_buf, rport->pwwn);
2066 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2096 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2067 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2097 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2068 "Remote port (WWN = %s) online for logical port (WWN = %s)\n", 2098 "Remote port (WWN = %s) online for logical port (WWN = %s)\n",
2069 rpwwn_buf, lpwwn_buf); 2099 rpwwn_buf, lpwwn_buf);
2100 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
2101 }
2070} 2102}
2071 2103
2072static void 2104static void
@@ -2083,16 +2115,21 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2083 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2115 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2084 wwn2str(rpwwn_buf, rport->pwwn); 2116 wwn2str(rpwwn_buf, rport->pwwn);
2085 if (!BFA_FCS_PID_IS_WKA(rport->pid)) { 2117 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2086 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) 2118 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
2087 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2119 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2088 "Remote port (WWN = %s) connectivity lost for " 2120 "Remote port (WWN = %s) connectivity lost for "
2089 "logical port (WWN = %s)\n", 2121 "logical port (WWN = %s)\n",
2090 rpwwn_buf, lpwwn_buf); 2122 rpwwn_buf, lpwwn_buf);
2091 else 2123 bfa_fcs_rport_aen_post(rport,
2124 BFA_RPORT_AEN_DISCONNECT, NULL);
2125 } else {
2092 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2126 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2093 "Remote port (WWN = %s) offlined by " 2127 "Remote port (WWN = %s) offlined by "
2094 "logical port (WWN = %s)\n", 2128 "logical port (WWN = %s)\n",
2095 rpwwn_buf, lpwwn_buf); 2129 rpwwn_buf, lpwwn_buf);
2130 bfa_fcs_rport_aen_post(rport,
2131 BFA_RPORT_AEN_OFFLINE, NULL);
2132 }
2096 } 2133 }
2097 2134
2098 if (bfa_fcs_lport_is_initiator(port)) { 2135 if (bfa_fcs_lport_is_initiator(port)) {
@@ -2366,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
2366 struct bfa_rport_qos_attr_s new_qos_attr) 2403 struct bfa_rport_qos_attr_s new_qos_attr)
2367{ 2404{
2368 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; 2405 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2406 struct bfa_rport_aen_data_s aen_data;
2369 2407
2370 bfa_trc(rport->fcs, rport->pwwn); 2408 bfa_trc(rport->fcs, rport->pwwn);
2409 aen_data.priv.qos = new_qos_attr;
2410 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
2371} 2411}
2372 2412
2373/* 2413/*
@@ -2390,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
2390 struct bfa_rport_qos_attr_s new_qos_attr) 2430 struct bfa_rport_qos_attr_s new_qos_attr)
2391{ 2431{
2392 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; 2432 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2433 struct bfa_rport_aen_data_s aen_data;
2393 2434
2394 bfa_trc(rport->fcs, rport->pwwn); 2435 bfa_trc(rport->fcs, rport->pwwn);
2436 aen_data.priv.qos = new_qos_attr;
2437 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
2395} 2438}
2396 2439
2397/* 2440/*
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index e7ffd8205dc7..ea24d4c6e67a 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -42,11 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
42 bfa->iocfc.bfa_regs.intr_status); 42 bfa->iocfc.bfa_regs.intr_status);
43} 43}
44 44
45/*
46 * Actions to respond RME Interrupt for Crossbow ASIC:
47 * - Write 1 to Interrupt Status register
48 * INTX - done in bfa_intx()
49 * MSIX - done in bfa_hwcb_rspq_ack_msix()
50 * - Update CI (only if new CI)
51 */
45static void 52static void
46bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) 53bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
47{ 54{
48 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), 55 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
49 bfa->iocfc.bfa_regs.intr_status); 56 bfa->iocfc.bfa_regs.intr_status);
57
58 if (bfa_rspq_ci(bfa, rspq) == ci)
59 return;
60
61 bfa_rspq_ci(bfa, rspq) = ci;
62 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
63 mmiowb();
64}
65
66void
67bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
68{
69 if (bfa_rspq_ci(bfa, rspq) == ci)
70 return;
71
72 bfa_rspq_ci(bfa, rspq) = ci;
73 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
74 mmiowb();
50} 75}
51 76
52void 77void
@@ -149,8 +174,13 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
149void 174void
150bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) 175bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
151{ 176{
152 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; 177 if (msix) {
153 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; 178 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
179 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
180 } else {
181 bfa->iocfc.hwif.hw_reqq_ack = NULL;
182 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
183 }
154} 184}
155 185
156void 186void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 989bbce9b296..637527f48b40 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -64,13 +64,36 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
64 writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 64 writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
65} 65}
66 66
67/*
68 * Actions to respond RME Interrupt for Catapult ASIC:
69 * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
70 * - Acknowledge by writing to RME Queue Control register
71 * - Update CI
72 */
67void 73void
68bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) 74bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
69{ 75{
70 u32 r32; 76 u32 r32;
71 77
72 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 78 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
73 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 79 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
80
81 bfa_rspq_ci(bfa, rspq) = ci;
82 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
83 mmiowb();
84}
85
86/*
87 * Actions to respond RME Interrupt for Catapult2 ASIC:
88 * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
89 * - Update CI
90 */
91void
92bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
93{
94 bfa_rspq_ci(bfa, rspq) = ci;
95 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
96 mmiowb();
74} 97}
75 98
76void 99void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index d6c2bf3865d2..1ac5aecf25a6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_ioc.h" 20#include "bfa_ioc.h"
20#include "bfi_reg.h" 21#include "bfi_reg.h"
21#include "bfa_defs.h" 22#include "bfa_defs.h"
@@ -458,6 +459,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
458 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 459 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
459 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 460 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
460 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); 461 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
462 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
461} 463}
462 464
463static void 465static void
@@ -502,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
502 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 504 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
503 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 505 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
504 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); 506 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
507 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
505} 508}
506 509
507/* 510/*
@@ -1966,6 +1969,7 @@ bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1966 1969
1967 BFA_LOG(KERN_CRIT, bfad, bfa_log_level, 1970 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1968 "Heart Beat of IOC has failed\n"); 1971 "Heart Beat of IOC has failed\n");
1972 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
1969 1973
1970} 1974}
1971 1975
@@ -1980,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1980 BFA_LOG(KERN_WARNING, bfad, bfa_log_level, 1984 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1981 "Running firmware version is incompatible " 1985 "Running firmware version is incompatible "
1982 "with the driver version\n"); 1986 "with the driver version\n");
1987 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
1983} 1988}
1984 1989
1985bfa_status_t 1990bfa_status_t
@@ -2679,6 +2684,43 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2679} 2684}
2680 2685
2681/* 2686/*
2687 * Send AEN notification
2688 */
2689void
2690bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2691{
2692 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2693 struct bfa_aen_entry_s *aen_entry;
2694 enum bfa_ioc_type_e ioc_type;
2695
2696 bfad_get_aen_entry(bfad, aen_entry);
2697 if (!aen_entry)
2698 return;
2699
2700 ioc_type = bfa_ioc_get_type(ioc);
2701 switch (ioc_type) {
2702 case BFA_IOC_TYPE_FC:
2703 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2704 break;
2705 case BFA_IOC_TYPE_FCoE:
2706 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2707 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2708 break;
2709 case BFA_IOC_TYPE_LL:
2710 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2711 break;
2712 default:
2713 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2714 break;
2715 }
2716
2717 /* Send the AEN notification */
2718 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2719 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2720 BFA_AEN_CAT_IOC, event);
2721}
2722
2723/*
2682 * Retrieve saved firmware trace from a prior IOC failure. 2724 * Retrieve saved firmware trace from a prior IOC failure.
2683 */ 2725 */
2684bfa_status_t 2726bfa_status_t
@@ -2879,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2879{ 2921{
2880 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) 2922 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2881 return; 2923 return;
2924 if (ioc->attr->nwwn == 0)
2925 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2926 if (ioc->attr->pwwn == 0)
2927 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2882} 2928}
2883 2929
2884/* 2930/*
@@ -3443,6 +3489,54 @@ bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3443} 3489}
3444 3490
3445/* 3491/*
3492 * SFP's State Change Notification post to AEN
3493 */
3494static void
3495bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3496{
3497 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3498 struct bfa_aen_entry_s *aen_entry;
3499 enum bfa_port_aen_event aen_evt = 0;
3500
3501 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3502 ((u64)rsp->event));
3503
3504 bfad_get_aen_entry(bfad, aen_entry);
3505 if (!aen_entry)
3506 return;
3507
3508 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3509 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3510 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3511
3512 switch (rsp->event) {
3513 case BFA_SFP_SCN_INSERTED:
3514 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3515 break;
3516 case BFA_SFP_SCN_REMOVED:
3517 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3518 break;
3519 case BFA_SFP_SCN_FAILED:
3520 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3521 break;
3522 case BFA_SFP_SCN_UNSUPPORT:
3523 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3524 break;
3525 case BFA_SFP_SCN_POM:
3526 aen_evt = BFA_PORT_AEN_SFP_POM;
3527 aen_entry->aen_data.port.level = rsp->pomlvl;
3528 break;
3529 default:
3530 bfa_trc(sfp, rsp->event);
3531 WARN_ON(1);
3532 }
3533
3534 /* Send the AEN notification */
3535 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3536 BFA_AEN_CAT_PORT, aen_evt);
3537}
3538
3539/*
3446 * SFP get data send 3540 * SFP get data send
3447 */ 3541 */
3448static void 3542static void
@@ -3482,6 +3576,50 @@ bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3482} 3576}
3483 3577
3484/* 3578/*
3579 * SFP scn handler
3580 */
3581static void
3582bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3583{
3584 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3585
3586 switch (rsp->event) {
3587 case BFA_SFP_SCN_INSERTED:
3588 sfp->state = BFA_SFP_STATE_INSERTED;
3589 sfp->data_valid = 0;
3590 bfa_sfp_scn_aen_post(sfp, rsp);
3591 break;
3592 case BFA_SFP_SCN_REMOVED:
3593 sfp->state = BFA_SFP_STATE_REMOVED;
3594 sfp->data_valid = 0;
3595 bfa_sfp_scn_aen_post(sfp, rsp);
3596 break;
3597 case BFA_SFP_SCN_FAILED:
3598 sfp->state = BFA_SFP_STATE_FAILED;
3599 sfp->data_valid = 0;
3600 bfa_sfp_scn_aen_post(sfp, rsp);
3601 break;
3602 case BFA_SFP_SCN_UNSUPPORT:
3603 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3604 bfa_sfp_scn_aen_post(sfp, rsp);
3605 if (!sfp->lock)
3606 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3607 break;
3608 case BFA_SFP_SCN_POM:
3609 bfa_sfp_scn_aen_post(sfp, rsp);
3610 break;
3611 case BFA_SFP_SCN_VALID:
3612 sfp->state = BFA_SFP_STATE_VALID;
3613 if (!sfp->lock)
3614 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3615 break;
3616 default:
3617 bfa_trc(sfp, rsp->event);
3618 WARN_ON(1);
3619 }
3620}
3621
3622/*
3485 * SFP show complete 3623 * SFP show complete
3486 */ 3624 */
3487static void 3625static void
@@ -3645,7 +3783,7 @@ bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3645 break; 3783 break;
3646 3784
3647 case BFI_SFP_I2H_SCN: 3785 case BFI_SFP_I2H_SCN:
3648 bfa_trc(sfp, msg->mh.msg_id); 3786 bfa_sfp_scn(sfp, msg);
3649 break; 3787 break;
3650 3788
3651 default: 3789 default:
@@ -3838,6 +3976,26 @@ bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3838 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ) 3976 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3839 3977
3840static void 3978static void
3979bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3980 int inst, int type)
3981{
3982 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3983 struct bfa_aen_entry_s *aen_entry;
3984
3985 bfad_get_aen_entry(bfad, aen_entry);
3986 if (!aen_entry)
3987 return;
3988
3989 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3990 aen_entry->aen_data.audit.partition_inst = inst;
3991 aen_entry->aen_data.audit.partition_type = type;
3992
3993 /* Send the AEN notification */
3994 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3995 BFA_AEN_CAT_AUDIT, event);
3996}
3997
3998static void
3841bfa_flash_cb(struct bfa_flash_s *flash) 3999bfa_flash_cb(struct bfa_flash_s *flash)
3842{ 4000{
3843 flash->op_busy = 0; 4001 flash->op_busy = 0;
@@ -3978,6 +4136,7 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
3978 struct bfi_flash_erase_rsp_s *erase; 4136 struct bfi_flash_erase_rsp_s *erase;
3979 struct bfi_flash_write_rsp_s *write; 4137 struct bfi_flash_write_rsp_s *write;
3980 struct bfi_flash_read_rsp_s *read; 4138 struct bfi_flash_read_rsp_s *read;
4139 struct bfi_flash_event_s *event;
3981 struct bfi_mbmsg_s *msg; 4140 struct bfi_mbmsg_s *msg;
3982 } m; 4141 } m;
3983 4142
@@ -4061,8 +4220,19 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4061 } 4220 }
4062 break; 4221 break;
4063 case BFI_FLASH_I2H_BOOT_VER_RSP: 4222 case BFI_FLASH_I2H_BOOT_VER_RSP:
4223 break;
4064 case BFI_FLASH_I2H_EVENT: 4224 case BFI_FLASH_I2H_EVENT:
4065 bfa_trc(flash, msg->mh.msg_id); 4225 status = be32_to_cpu(m.event->status);
4226 bfa_trc(flash, status);
4227 if (status == BFA_STATUS_BAD_FWCFG)
4228 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4229 else if (status == BFA_STATUS_INVALID_VENDOR) {
4230 u32 param;
4231 param = be32_to_cpu(m.event->param);
4232 bfa_trc(flash, param);
4233 bfa_ioc_aen_post(flash->ioc,
4234 BFA_IOC_AEN_INVALID_VENDOR);
4235 }
4066 break; 4236 break;
4067 4237
4068 default: 4238 default:
@@ -4204,6 +4374,8 @@ bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4204 flash->instance = instance; 4374 flash->instance = instance;
4205 4375
4206 bfa_flash_erase_send(flash); 4376 bfa_flash_erase_send(flash);
4377 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4378 instance, type);
4207 return BFA_STATUS_OK; 4379 return BFA_STATUS_OK;
4208} 4380}
4209 4381
@@ -5416,3 +5588,396 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5416 WARN_ON(1); 5588 WARN_ON(1);
5417 } 5589 }
5418} 5590}
5591
5592/*
5593 * DCONF module specific
5594 */
5595
5596BFA_MODULE(dconf);
5597
5598/*
5599 * DCONF state machine events
5600 */
5601enum bfa_dconf_event {
5602 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5603 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5604 BFA_DCONF_SM_WR = 3, /* binding change, map */
5605 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5606 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5607 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5608};
5609
5610/* forward declaration of DCONF state machine */
5611static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5612 enum bfa_dconf_event event);
5613static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5614 enum bfa_dconf_event event);
5615static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5616 enum bfa_dconf_event event);
5617static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5618 enum bfa_dconf_event event);
5619static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5620 enum bfa_dconf_event event);
5621static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5622 enum bfa_dconf_event event);
5623static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5624 enum bfa_dconf_event event);
5625
5626static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5627static void bfa_dconf_timer(void *cbarg);
5628static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5629static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5630
5631/*
5632 * Begining state of dconf module. Waiting for an event to start.
5633 */
5634static void
5635bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5636{
5637 bfa_status_t bfa_status;
5638 bfa_trc(dconf->bfa, event);
5639
5640 switch (event) {
5641 case BFA_DCONF_SM_INIT:
5642 if (dconf->min_cfg) {
5643 bfa_trc(dconf->bfa, dconf->min_cfg);
5644 return;
5645 }
5646 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5647 dconf->flashdone = BFA_FALSE;
5648 bfa_trc(dconf->bfa, dconf->flashdone);
5649 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5650 BFA_FLASH_PART_DRV, dconf->instance,
5651 dconf->dconf,
5652 sizeof(struct bfa_dconf_s), 0,
5653 bfa_dconf_init_cb, dconf->bfa);
5654 if (bfa_status != BFA_STATUS_OK) {
5655 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5656 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5657 return;
5658 }
5659 break;
5660 case BFA_DCONF_SM_EXIT:
5661 dconf->flashdone = BFA_TRUE;
5662 case BFA_DCONF_SM_IOCDISABLE:
5663 case BFA_DCONF_SM_WR:
5664 case BFA_DCONF_SM_FLASH_COMP:
5665 break;
5666 default:
5667 bfa_sm_fault(dconf->bfa, event);
5668 }
5669}
5670
5671/*
5672 * Read flash for dconf entries and make a call back to the driver once done.
5673 */
5674static void
5675bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5676 enum bfa_dconf_event event)
5677{
5678 bfa_trc(dconf->bfa, event);
5679
5680 switch (event) {
5681 case BFA_DCONF_SM_FLASH_COMP:
5682 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5683 break;
5684 case BFA_DCONF_SM_TIMEOUT:
5685 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5686 break;
5687 case BFA_DCONF_SM_EXIT:
5688 dconf->flashdone = BFA_TRUE;
5689 bfa_trc(dconf->bfa, dconf->flashdone);
5690 case BFA_DCONF_SM_IOCDISABLE:
5691 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5692 break;
5693 default:
5694 bfa_sm_fault(dconf->bfa, event);
5695 }
5696}
5697
5698/*
5699 * DCONF Module is in ready state. Has completed the initialization.
5700 */
5701static void
5702bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5703{
5704 bfa_trc(dconf->bfa, event);
5705
5706 switch (event) {
5707 case BFA_DCONF_SM_WR:
5708 bfa_timer_start(dconf->bfa, &dconf->timer,
5709 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5710 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5711 break;
5712 case BFA_DCONF_SM_EXIT:
5713 dconf->flashdone = BFA_TRUE;
5714 bfa_trc(dconf->bfa, dconf->flashdone);
5715 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5716 break;
5717 case BFA_DCONF_SM_INIT:
5718 case BFA_DCONF_SM_IOCDISABLE:
5719 break;
5720 default:
5721 bfa_sm_fault(dconf->bfa, event);
5722 }
5723}
5724
5725/*
5726 * entries are dirty, write back to the flash.
5727 */
5728
5729static void
5730bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5731{
5732 bfa_trc(dconf->bfa, event);
5733
5734 switch (event) {
5735 case BFA_DCONF_SM_TIMEOUT:
5736 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5737 bfa_dconf_flash_write(dconf);
5738 break;
5739 case BFA_DCONF_SM_WR:
5740 bfa_timer_stop(&dconf->timer);
5741 bfa_timer_start(dconf->bfa, &dconf->timer,
5742 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5743 break;
5744 case BFA_DCONF_SM_EXIT:
5745 bfa_timer_stop(&dconf->timer);
5746 bfa_timer_start(dconf->bfa, &dconf->timer,
5747 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5748 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5749 bfa_dconf_flash_write(dconf);
5750 break;
5751 case BFA_DCONF_SM_FLASH_COMP:
5752 break;
5753 case BFA_DCONF_SM_IOCDISABLE:
5754 bfa_timer_stop(&dconf->timer);
5755 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5756 break;
5757 default:
5758 bfa_sm_fault(dconf->bfa, event);
5759 }
5760}
5761
5762/*
5763 * Sync the dconf entries to the flash.
5764 */
5765static void
5766bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5767 enum bfa_dconf_event event)
5768{
5769 bfa_trc(dconf->bfa, event);
5770
5771 switch (event) {
5772 case BFA_DCONF_SM_IOCDISABLE:
5773 case BFA_DCONF_SM_FLASH_COMP:
5774 bfa_timer_stop(&dconf->timer);
5775 case BFA_DCONF_SM_TIMEOUT:
5776 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5777 dconf->flashdone = BFA_TRUE;
5778 bfa_trc(dconf->bfa, dconf->flashdone);
5779 bfa_ioc_disable(&dconf->bfa->ioc);
5780 break;
5781 default:
5782 bfa_sm_fault(dconf->bfa, event);
5783 }
5784}
5785
5786static void
5787bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5788{
5789 bfa_trc(dconf->bfa, event);
5790
5791 switch (event) {
5792 case BFA_DCONF_SM_FLASH_COMP:
5793 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5794 break;
5795 case BFA_DCONF_SM_WR:
5796 bfa_timer_start(dconf->bfa, &dconf->timer,
5797 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5798 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5799 break;
5800 case BFA_DCONF_SM_EXIT:
5801 bfa_timer_start(dconf->bfa, &dconf->timer,
5802 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5803 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5804 break;
5805 case BFA_DCONF_SM_IOCDISABLE:
5806 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5807 break;
5808 default:
5809 bfa_sm_fault(dconf->bfa, event);
5810 }
5811}
5812
5813static void
5814bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5815 enum bfa_dconf_event event)
5816{
5817 bfa_trc(dconf->bfa, event);
5818
5819 switch (event) {
5820 case BFA_DCONF_SM_INIT:
5821 bfa_timer_start(dconf->bfa, &dconf->timer,
5822 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5823 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5824 break;
5825 case BFA_DCONF_SM_EXIT:
5826 dconf->flashdone = BFA_TRUE;
5827 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5828 break;
5829 case BFA_DCONF_SM_IOCDISABLE:
5830 break;
5831 default:
5832 bfa_sm_fault(dconf->bfa, event);
5833 }
5834}
5835
5836/*
5837 * Compute and return memory needed by DRV_CFG module.
5838 */
5839static void
5840bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5841 struct bfa_s *bfa)
5842{
5843 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5844
5845 if (cfg->drvcfg.min_cfg)
5846 bfa_mem_kva_setup(meminfo, dconf_kva,
5847 sizeof(struct bfa_dconf_hdr_s));
5848 else
5849 bfa_mem_kva_setup(meminfo, dconf_kva,
5850 sizeof(struct bfa_dconf_s));
5851}
5852
5853static void
5854bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5855 struct bfa_pcidev_s *pcidev)
5856{
5857 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5858
5859 dconf->bfad = bfad;
5860 dconf->bfa = bfa;
5861 dconf->instance = bfa->ioc.port_id;
5862 bfa_trc(bfa, dconf->instance);
5863
5864 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5865 if (cfg->drvcfg.min_cfg) {
5866 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5867 dconf->min_cfg = BFA_TRUE;
5868 /*
5869 * Set the flashdone flag to TRUE explicitly as no flash
5870 * write will happen in min_cfg mode.
5871 */
5872 dconf->flashdone = BFA_TRUE;
5873 } else {
5874 dconf->min_cfg = BFA_FALSE;
5875 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5876 }
5877
5878 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5879 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5880}
5881
5882static void
5883bfa_dconf_init_cb(void *arg, bfa_status_t status)
5884{
5885 struct bfa_s *bfa = arg;
5886 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5887
5888 dconf->flashdone = BFA_TRUE;
5889 bfa_trc(bfa, dconf->flashdone);
5890 bfa_iocfc_cb_dconf_modinit(bfa, status);
5891 if (status == BFA_STATUS_OK) {
5892 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5893 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5894 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5895 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5896 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5897 }
5898 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5899}
5900
5901void
5902bfa_dconf_modinit(struct bfa_s *bfa)
5903{
5904 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5905 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5906}
5907static void
5908bfa_dconf_start(struct bfa_s *bfa)
5909{
5910}
5911
5912static void
5913bfa_dconf_stop(struct bfa_s *bfa)
5914{
5915}
5916
5917static void bfa_dconf_timer(void *cbarg)
5918{
5919 struct bfa_dconf_mod_s *dconf = cbarg;
5920 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5921}
5922static void
5923bfa_dconf_iocdisable(struct bfa_s *bfa)
5924{
5925 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5926 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5927}
5928
5929static void
5930bfa_dconf_detach(struct bfa_s *bfa)
5931{
5932}
5933
5934static bfa_status_t
5935bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5936{
5937 bfa_status_t bfa_status;
5938 bfa_trc(dconf->bfa, 0);
5939
5940 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5941 BFA_FLASH_PART_DRV, dconf->instance,
5942 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5943 bfa_dconf_cbfn, dconf);
5944 if (bfa_status != BFA_STATUS_OK)
5945 WARN_ON(bfa_status);
5946 bfa_trc(dconf->bfa, bfa_status);
5947
5948 return bfa_status;
5949}
5950
5951bfa_status_t
5952bfa_dconf_update(struct bfa_s *bfa)
5953{
5954 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5955 bfa_trc(dconf->bfa, 0);
5956 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5957 return BFA_STATUS_FAILED;
5958
5959 if (dconf->min_cfg) {
5960 bfa_trc(dconf->bfa, dconf->min_cfg);
5961 return BFA_STATUS_FAILED;
5962 }
5963
5964 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5965 return BFA_STATUS_OK;
5966}
5967
5968static void
5969bfa_dconf_cbfn(void *arg, bfa_status_t status)
5970{
5971 struct bfa_dconf_mod_s *dconf = arg;
5972 WARN_ON(status);
5973 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5974}
5975
5976void
5977bfa_dconf_modexit(struct bfa_s *bfa)
5978{
5979 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5980 BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
5981 bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
5982 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5983}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index c5ecd2edc95d..546d46b37101 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -327,6 +327,7 @@ struct bfa_ioc_s {
327 enum bfa_mode_s port_mode; 327 enum bfa_mode_s port_mode;
328 u8 ad_cap_bm; /* adapter cap bit mask */ 328 u8 ad_cap_bm; /* adapter cap bit mask */
329 u8 port_mode_cfg; /* config port mode */ 329 u8 port_mode_cfg; /* config port mode */
330 int ioc_aen_seq;
330}; 331};
331 332
332struct bfa_ioc_hwif_s { 333struct bfa_ioc_hwif_s {
@@ -366,6 +367,8 @@ struct bfa_cb_qe_s {
366 struct list_head qe; 367 struct list_head qe;
367 bfa_cb_cbfn_t cbfn; 368 bfa_cb_cbfn_t cbfn;
368 bfa_boolean_t once; 369 bfa_boolean_t once;
370 bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
371 bfa_status_t fw_status; /* to access fw status in comp proc */
369 void *cbarg; 372 void *cbarg;
370}; 373};
371 374
@@ -658,7 +661,6 @@ struct bfa_phy_s {
658 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ 661 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
659 struct bfa_mem_dma_s phy_dma; 662 struct bfa_mem_dma_s phy_dma;
660}; 663};
661
662#define BFA_PHY(__bfa) (&(__bfa)->modules.phy) 664#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
663#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma)) 665#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
664 666
@@ -684,6 +686,49 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
684void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); 686void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
685 687
686/* 688/*
689 * Driver Config( dconf) specific
690 */
691#define BFI_DCONF_SIGNATURE 0xabcdabcd
692#define BFI_DCONF_VERSION 1
693
694#pragma pack(1)
695struct bfa_dconf_hdr_s {
696 u32 signature;
697 u32 version;
698};
699
700struct bfa_dconf_s {
701 struct bfa_dconf_hdr_s hdr;
702 struct bfa_lunmask_cfg_s lun_mask;
703};
704#pragma pack()
705
706struct bfa_dconf_mod_s {
707 bfa_sm_t sm;
708 u8 instance;
709 bfa_boolean_t flashdone;
710 bfa_boolean_t read_data_valid;
711 bfa_boolean_t min_cfg;
712 struct bfa_timer_s timer;
713 struct bfa_s *bfa;
714 void *bfad;
715 void *trcmod;
716 struct bfa_dconf_s *dconf;
717 struct bfa_mem_kva_s kva_seg;
718};
719
720#define BFA_DCONF_MOD(__bfa) \
721 (&(__bfa)->modules.dconf_mod)
722#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg))
723#define bfa_dconf_read_data_valid(__bfa) \
724 (BFA_DCONF_MOD(__bfa)->read_data_valid)
725#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
726
727void bfa_dconf_modinit(struct bfa_s *bfa);
728void bfa_dconf_modexit(struct bfa_s *bfa);
729bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
730
731/*
687 * IOC specfic macros 732 * IOC specfic macros
688 */ 733 */
689#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 734#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -803,6 +848,7 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
803 struct bfi_ioc_image_hdr_s *fwhdr); 848 struct bfi_ioc_image_hdr_s *fwhdr);
804bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, 849bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
805 struct bfi_ioc_image_hdr_s *fwhdr); 850 struct bfi_ioc_image_hdr_s *fwhdr);
851void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
806bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats); 852bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
807bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); 853bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
808 854
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 1c6efd40a673..2d36e4823835 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -44,6 +44,7 @@ struct bfa_modules_s {
44 struct bfa_flash_s flash; /* flash module */ 44 struct bfa_flash_s flash; /* flash module */
45 struct bfa_diag_s diag_mod; /* diagnostics module */ 45 struct bfa_diag_s diag_mod; /* diagnostics module */
46 struct bfa_phy_s phy; /* phy module */ 46 struct bfa_phy_s phy; /* phy module */
47 struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
47}; 48};
48 49
49/* 50/*
@@ -119,6 +120,7 @@ struct bfa_s {
119 struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; 120 struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
120 bfa_boolean_t fcs; /* FCS is attached to BFA */ 121 bfa_boolean_t fcs; /* FCS is attached to BFA */
121 struct bfa_msix_s msix; 122 struct bfa_msix_s msix;
123 int bfa_aen_seq;
122}; 124};
123 125
124extern bfa_boolean_t bfa_auto_recover; 126extern bfa_boolean_t bfa_auto_recover;
@@ -130,5 +132,6 @@ extern struct bfa_module_s hal_mod_lps;
130extern struct bfa_module_s hal_mod_uf; 132extern struct bfa_module_s hal_mod_uf;
131extern struct bfa_module_s hal_mod_rport; 133extern struct bfa_module_s hal_mod_rport;
132extern struct bfa_module_s hal_mod_fcp; 134extern struct bfa_module_s hal_mod_fcp;
135extern struct bfa_module_s hal_mod_dconf;
133 136
134#endif /* __BFA_MODULES_H__ */ 137#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 21caaefce99f..aa8a0eaf91f9 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_plog.h" 20#include "bfa_plog.h"
20#include "bfa_cs.h" 21#include "bfa_cs.h"
21#include "bfa_modules.h" 22#include "bfa_modules.h"
@@ -2007,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2007 } 2008 }
2008} 2009}
2009 2010
2011static void
2012bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2013{
2014 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2015 struct bfa_aen_entry_s *aen_entry;
2016
2017 bfad_get_aen_entry(bfad, aen_entry);
2018 if (!aen_entry)
2019 return;
2020
2021 aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2022 aen_entry->aen_data.port.pwwn = fcport->pwwn;
2023
2024 /* Send the AEN notification */
2025 bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2026 BFA_AEN_CAT_PORT, event);
2027}
2028
2010/* 2029/*
2011 * FC PORT state machine functions 2030 * FC PORT state machine functions
2012 */ 2031 */
@@ -2095,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2095 wwn2str(pwwn_buf, fcport->pwwn); 2114 wwn2str(pwwn_buf, fcport->pwwn);
2096 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2115 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2097 "Base port disabled: WWN = %s\n", pwwn_buf); 2116 "Base port disabled: WWN = %s\n", pwwn_buf);
2117 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2098 break; 2118 break;
2099 2119
2100 case BFA_FCPORT_SM_LINKUP: 2120 case BFA_FCPORT_SM_LINKUP:
@@ -2155,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2155 wwn2str(pwwn_buf, fcport->pwwn); 2175 wwn2str(pwwn_buf, fcport->pwwn);
2156 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2176 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2157 "Base port disabled: WWN = %s\n", pwwn_buf); 2177 "Base port disabled: WWN = %s\n", pwwn_buf);
2178 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2158 break; 2179 break;
2159 2180
2160 case BFA_FCPORT_SM_STOP: 2181 case BFA_FCPORT_SM_STOP:
@@ -2208,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2208 wwn2str(pwwn_buf, fcport->pwwn); 2229 wwn2str(pwwn_buf, fcport->pwwn);
2209 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2230 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2210 "Base port online: WWN = %s\n", pwwn_buf); 2231 "Base port online: WWN = %s\n", pwwn_buf);
2232 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2233
2234 /* If QoS is enabled and it is not online, send AEN */
2235 if (fcport->cfg.qos_enabled &&
2236 fcport->qos_attr.state != BFA_QOS_ONLINE)
2237 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2211 break; 2238 break;
2212 2239
2213 case BFA_FCPORT_SM_LINKDOWN: 2240 case BFA_FCPORT_SM_LINKDOWN:
@@ -2234,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2234 wwn2str(pwwn_buf, fcport->pwwn); 2261 wwn2str(pwwn_buf, fcport->pwwn);
2235 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2262 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2236 "Base port disabled: WWN = %s\n", pwwn_buf); 2263 "Base port disabled: WWN = %s\n", pwwn_buf);
2264 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2237 break; 2265 break;
2238 2266
2239 case BFA_FCPORT_SM_STOP: 2267 case BFA_FCPORT_SM_STOP:
@@ -2279,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2279 wwn2str(pwwn_buf, fcport->pwwn); 2307 wwn2str(pwwn_buf, fcport->pwwn);
2280 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2308 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2281 "Base port offline: WWN = %s\n", pwwn_buf); 2309 "Base port offline: WWN = %s\n", pwwn_buf);
2310 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2282 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2311 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2283 "Base port disabled: WWN = %s\n", pwwn_buf); 2312 "Base port disabled: WWN = %s\n", pwwn_buf);
2313 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2284 break; 2314 break;
2285 2315
2286 case BFA_FCPORT_SM_LINKDOWN: 2316 case BFA_FCPORT_SM_LINKDOWN:
@@ -2290,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2290 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2320 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2291 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); 2321 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2292 wwn2str(pwwn_buf, fcport->pwwn); 2322 wwn2str(pwwn_buf, fcport->pwwn);
2293 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2323 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2294 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2324 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2295 "Base port offline: WWN = %s\n", pwwn_buf); 2325 "Base port offline: WWN = %s\n", pwwn_buf);
2296 else 2326 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2327 } else {
2297 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2328 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2298 "Base port (WWN = %s) " 2329 "Base port (WWN = %s) "
2299 "lost fabric connectivity\n", pwwn_buf); 2330 "lost fabric connectivity\n", pwwn_buf);
2331 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2332 }
2300 break; 2333 break;
2301 2334
2302 case BFA_FCPORT_SM_STOP: 2335 case BFA_FCPORT_SM_STOP:
2303 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2336 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2304 bfa_fcport_reset_linkinfo(fcport); 2337 bfa_fcport_reset_linkinfo(fcport);
2305 wwn2str(pwwn_buf, fcport->pwwn); 2338 wwn2str(pwwn_buf, fcport->pwwn);
2306 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2339 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2307 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2340 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2308 "Base port offline: WWN = %s\n", pwwn_buf); 2341 "Base port offline: WWN = %s\n", pwwn_buf);
2309 else 2342 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2343 } else {
2310 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2344 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2311 "Base port (WWN = %s) " 2345 "Base port (WWN = %s) "
2312 "lost fabric connectivity\n", pwwn_buf); 2346 "lost fabric connectivity\n", pwwn_buf);
2347 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2348 }
2313 break; 2349 break;
2314 2350
2315 case BFA_FCPORT_SM_HWFAIL: 2351 case BFA_FCPORT_SM_HWFAIL:
@@ -2317,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2317 bfa_fcport_reset_linkinfo(fcport); 2353 bfa_fcport_reset_linkinfo(fcport);
2318 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); 2354 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2319 wwn2str(pwwn_buf, fcport->pwwn); 2355 wwn2str(pwwn_buf, fcport->pwwn);
2320 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2356 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2321 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2357 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2322 "Base port offline: WWN = %s\n", pwwn_buf); 2358 "Base port offline: WWN = %s\n", pwwn_buf);
2323 else 2359 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2360 } else {
2324 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2361 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2325 "Base port (WWN = %s) " 2362 "Base port (WWN = %s) "
2326 "lost fabric connectivity\n", pwwn_buf); 2363 "lost fabric connectivity\n", pwwn_buf);
2364 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2365 }
2327 break; 2366 break;
2328 2367
2329 default: 2368 default:
@@ -2454,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2454 wwn2str(pwwn_buf, fcport->pwwn); 2493 wwn2str(pwwn_buf, fcport->pwwn);
2455 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2494 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2456 "Base port enabled: WWN = %s\n", pwwn_buf); 2495 "Base port enabled: WWN = %s\n", pwwn_buf);
2496 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2457 break; 2497 break;
2458 2498
2459 case BFA_FCPORT_SM_STOP: 2499 case BFA_FCPORT_SM_STOP:
@@ -2508,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2508 wwn2str(pwwn_buf, fcport->pwwn); 2548 wwn2str(pwwn_buf, fcport->pwwn);
2509 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2549 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2510 "Base port enabled: WWN = %s\n", pwwn_buf); 2550 "Base port enabled: WWN = %s\n", pwwn_buf);
2551 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2511 break; 2552 break;
2512 2553
2513 case BFA_FCPORT_SM_DISABLE: 2554 case BFA_FCPORT_SM_DISABLE:
@@ -2874,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2874 2915
2875 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; 2916 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2876 2917
2918 INIT_LIST_HEAD(&fcport->stats_pending_q);
2919 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
2920
2877 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); 2921 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2878} 2922}
2879 2923
@@ -3102,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3102static void 3146static void
3103__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) 3147__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3104{ 3148{
3105 struct bfa_fcport_s *fcport = cbarg; 3149 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3150 struct bfa_cb_pending_q_s *cb;
3151 struct list_head *qe, *qen;
3152 union bfa_fcport_stats_u *ret;
3106 3153
3107 if (complete) { 3154 if (complete) {
3108 if (fcport->stats_status == BFA_STATUS_OK) { 3155 struct timeval tv;
3109 struct timeval tv; 3156 if (fcport->stats_status == BFA_STATUS_OK)
3110 3157 do_gettimeofday(&tv);
3111 /* Swap FC QoS or FCoE stats */ 3158
3112 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 3159 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3113 bfa_fcport_qos_stats_swap( 3160 bfa_q_deq(&fcport->stats_pending_q, &qe);
3114 &fcport->stats_ret->fcqos, 3161 cb = (struct bfa_cb_pending_q_s *)qe;
3115 &fcport->stats->fcqos); 3162 if (fcport->stats_status == BFA_STATUS_OK) {
3116 } else { 3163 ret = (union bfa_fcport_stats_u *)cb->data;
3117 bfa_fcport_fcoe_stats_swap( 3164 /* Swap FC QoS or FCoE stats */
3118 &fcport->stats_ret->fcoe, 3165 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3119 &fcport->stats->fcoe); 3166 bfa_fcport_qos_stats_swap(&ret->fcqos,
3120 3167 &fcport->stats->fcqos);
3121 do_gettimeofday(&tv); 3168 else {
3122 fcport->stats_ret->fcoe.secs_reset = 3169 bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3170 &fcport->stats->fcoe);
3171 ret->fcoe.secs_reset =
3123 tv.tv_sec - fcport->stats_reset_time; 3172 tv.tv_sec - fcport->stats_reset_time;
3173 }
3124 } 3174 }
3175 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3176 fcport->stats_status);
3125 } 3177 }
3126 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3178 fcport->stats_status = BFA_STATUS_OK;
3127 } else { 3179 } else {
3128 fcport->stats_busy = BFA_FALSE; 3180 INIT_LIST_HEAD(&fcport->stats_pending_q);
3129 fcport->stats_status = BFA_STATUS_OK; 3181 fcport->stats_status = BFA_STATUS_OK;
3130 } 3182 }
3131} 3183}
@@ -3143,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg)
3143 } 3195 }
3144 3196
3145 fcport->stats_status = BFA_STATUS_ETIMER; 3197 fcport->stats_status = BFA_STATUS_ETIMER;
3146 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, 3198 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3147 fcport);
3148} 3199}
3149 3200
3150static void 3201static void
@@ -3174,7 +3225,9 @@ bfa_fcport_send_stats_get(void *cbarg)
3174static void 3225static void
3175__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) 3226__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3176{ 3227{
3177 struct bfa_fcport_s *fcport = cbarg; 3228 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3229 struct bfa_cb_pending_q_s *cb;
3230 struct list_head *qe, *qen;
3178 3231
3179 if (complete) { 3232 if (complete) {
3180 struct timeval tv; 3233 struct timeval tv;
@@ -3184,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3184 */ 3237 */
3185 do_gettimeofday(&tv); 3238 do_gettimeofday(&tv);
3186 fcport->stats_reset_time = tv.tv_sec; 3239 fcport->stats_reset_time = tv.tv_sec;
3187 3240 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3188 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3241 bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3242 cb = (struct bfa_cb_pending_q_s *)qe;
3243 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3244 fcport->stats_status);
3245 }
3246 fcport->stats_status = BFA_STATUS_OK;
3189 } else { 3247 } else {
3190 fcport->stats_busy = BFA_FALSE; 3248 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3191 fcport->stats_status = BFA_STATUS_OK; 3249 fcport->stats_status = BFA_STATUS_OK;
3192 } 3250 }
3193} 3251}
@@ -3205,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg)
3205 } 3263 }
3206 3264
3207 fcport->stats_status = BFA_STATUS_ETIMER; 3265 fcport->stats_status = BFA_STATUS_ETIMER;
3208 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3266 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3209 __bfa_cb_fcport_stats_clr, fcport);
3210} 3267}
3211 3268
3212static void 3269static void
@@ -3402,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3402 fcport->use_flash_cfg = BFA_FALSE; 3459 fcport->use_flash_cfg = BFA_FALSE;
3403 } 3460 }
3404 3461
3462 if (fcport->cfg.qos_enabled)
3463 fcport->qos_attr.state = BFA_QOS_OFFLINE;
3464 else
3465 fcport->qos_attr.state = BFA_QOS_DISABLED;
3466
3405 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 3467 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3406 } 3468 }
3407 break; 3469 break;
@@ -3426,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3426 /* 3488 /*
3427 * check for timer pop before processing the rsp 3489 * check for timer pop before processing the rsp
3428 */ 3490 */
3429 if (fcport->stats_busy == BFA_FALSE || 3491 if (list_empty(&fcport->stats_pending_q) ||
3430 fcport->stats_status == BFA_STATUS_ETIMER) 3492 (fcport->stats_status == BFA_STATUS_ETIMER))
3431 break; 3493 break;
3432 3494
3433 bfa_timer_stop(&fcport->timer); 3495 bfa_timer_stop(&fcport->timer);
3434 fcport->stats_status = i2hmsg.pstatsget_rsp->status; 3496 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3435 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3497 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3436 __bfa_cb_fcport_stats_get, fcport);
3437 break; 3498 break;
3438 3499
3439 case BFI_FCPORT_I2H_STATS_CLEAR_RSP: 3500 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3440 /* 3501 /*
3441 * check for timer pop before processing the rsp 3502 * check for timer pop before processing the rsp
3442 */ 3503 */
3443 if (fcport->stats_busy == BFA_FALSE || 3504 if (list_empty(&fcport->statsclr_pending_q) ||
3444 fcport->stats_status == BFA_STATUS_ETIMER) 3505 (fcport->stats_status == BFA_STATUS_ETIMER))
3445 break; 3506 break;
3446 3507
3447 bfa_timer_stop(&fcport->timer); 3508 bfa_timer_stop(&fcport->timer);
3448 fcport->stats_status = BFA_STATUS_OK; 3509 fcport->stats_status = BFA_STATUS_OK;
3449 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3510 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3450 __bfa_cb_fcport_stats_clr, fcport);
3451 break; 3511 break;
3452 3512
3453 case BFI_FCPORT_I2H_ENABLE_AEN: 3513 case BFI_FCPORT_I2H_ENABLE_AEN:
@@ -3779,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3779 * Fetch port statistics (FCQoS or FCoE). 3839 * Fetch port statistics (FCQoS or FCoE).
3780 */ 3840 */
3781bfa_status_t 3841bfa_status_t
3782bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, 3842bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3783 bfa_cb_port_t cbfn, void *cbarg)
3784{ 3843{
3785 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3844 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3786 3845
3787 if (fcport->stats_busy) { 3846 if (bfa_ioc_is_disabled(&bfa->ioc))
3788 bfa_trc(bfa, fcport->stats_busy); 3847 return BFA_STATUS_IOC_DISABLED;
3789 return BFA_STATUS_DEVBUSY;
3790 }
3791 3848
3792 fcport->stats_busy = BFA_TRUE; 3849 if (!list_empty(&fcport->statsclr_pending_q))
3793 fcport->stats_ret = stats; 3850 return BFA_STATUS_DEVBUSY;
3794 fcport->stats_cbfn = cbfn;
3795 fcport->stats_cbarg = cbarg;
3796 3851
3797 bfa_fcport_send_stats_get(fcport); 3852 if (list_empty(&fcport->stats_pending_q)) {
3853 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3854 bfa_fcport_send_stats_get(fcport);
3855 bfa_timer_start(bfa, &fcport->timer,
3856 bfa_fcport_stats_get_timeout,
3857 fcport, BFA_FCPORT_STATS_TOV);
3858 } else
3859 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3798 3860
3799 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3800 fcport, BFA_FCPORT_STATS_TOV);
3801 return BFA_STATUS_OK; 3861 return BFA_STATUS_OK;
3802} 3862}
3803 3863
@@ -3805,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3805 * Reset port statistics (FCQoS or FCoE). 3865 * Reset port statistics (FCQoS or FCoE).
3806 */ 3866 */
3807bfa_status_t 3867bfa_status_t
3808bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) 3868bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3809{ 3869{
3810 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3870 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3811 3871
3812 if (fcport->stats_busy) { 3872 if (!list_empty(&fcport->stats_pending_q))
3813 bfa_trc(bfa, fcport->stats_busy);
3814 return BFA_STATUS_DEVBUSY; 3873 return BFA_STATUS_DEVBUSY;
3815 }
3816
3817 fcport->stats_busy = BFA_TRUE;
3818 fcport->stats_cbfn = cbfn;
3819 fcport->stats_cbarg = cbarg;
3820 3874
3821 bfa_fcport_send_stats_clear(fcport); 3875 if (list_empty(&fcport->statsclr_pending_q)) {
3876 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3877 bfa_fcport_send_stats_clear(fcport);
3878 bfa_timer_start(bfa, &fcport->timer,
3879 bfa_fcport_stats_clr_timeout,
3880 fcport, BFA_FCPORT_STATS_TOV);
3881 } else
3882 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3822 3883
3823 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3824 fcport, BFA_FCPORT_STATS_TOV);
3825 return BFA_STATUS_OK; 3884 return BFA_STATUS_OK;
3826} 3885}
3827 3886
3828
3829/* 3887/*
3830 * Fetch port attributes. 3888 * Fetch port attributes.
3831 */ 3889 */
@@ -4619,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4619 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); 4677 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4620 rp->fw_handle = msg.create_rsp->fw_handle; 4678 rp->fw_handle = msg.create_rsp->fw_handle;
4621 rp->qos_attr = msg.create_rsp->qos_attr; 4679 rp->qos_attr = msg.create_rsp->qos_attr;
4680 bfa_rport_set_lunmask(bfa, rp);
4622 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); 4681 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4623 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4682 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4624 break; 4683 break;
@@ -4626,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4626 case BFI_RPORT_I2H_DELETE_RSP: 4685 case BFI_RPORT_I2H_DELETE_RSP:
4627 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); 4686 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4628 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); 4687 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4688 bfa_rport_unset_lunmask(bfa, rp);
4629 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4689 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4630 break; 4690 break;
4631 4691
@@ -4706,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4706 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); 4766 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4707} 4767}
4708 4768
4769/* Set Rport LUN Mask */
4770void
4771bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4772{
4773 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
4774 wwn_t lp_wwn, rp_wwn;
4775 u8 lp_tag = (u8)rp->rport_info.lp_tag;
4776
4777 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4778 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4779
4780 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4781 rp->lun_mask = BFA_TRUE;
4782 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
4783}
4784
4785/* Unset Rport LUN mask */
4786void
4787bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4788{
4789 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
4790 wwn_t lp_wwn, rp_wwn;
4791
4792 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4793 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4794
4795 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4796 rp->lun_mask = BFA_FALSE;
4797 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
4798 BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
4799}
4709 4800
4710/* 4801/*
4711 * SGPG related functions 4802 * SGPG related functions
@@ -5517,11 +5608,29 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5517 return BFA_STATUS_PORT_NOT_DISABLED; 5608 return BFA_STATUS_PORT_NOT_DISABLED;
5518 } 5609 }
5519 5610
5520 /* Check if the speed is supported */ 5611 /*
5521 bfa_fcport_get_attr(bfa, &attr); 5612 * Check if input speed is supported by the port mode
5522 bfa_trc(fcdiag, attr.speed_supported); 5613 */
5523 if (speed > attr.speed_supported) 5614 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5524 return BFA_STATUS_UNSUPP_SPEED; 5615 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5616 speed == BFA_PORT_SPEED_2GBPS ||
5617 speed == BFA_PORT_SPEED_4GBPS ||
5618 speed == BFA_PORT_SPEED_8GBPS ||
5619 speed == BFA_PORT_SPEED_16GBPS ||
5620 speed == BFA_PORT_SPEED_AUTO)) {
5621 bfa_trc(fcdiag, speed);
5622 return BFA_STATUS_UNSUPP_SPEED;
5623 }
5624 bfa_fcport_get_attr(bfa, &attr);
5625 bfa_trc(fcdiag, attr.speed_supported);
5626 if (speed > attr.speed_supported)
5627 return BFA_STATUS_UNSUPP_SPEED;
5628 } else {
5629 if (speed != BFA_PORT_SPEED_10GBPS) {
5630 bfa_trc(fcdiag, speed);
5631 return BFA_STATUS_UNSUPP_SPEED;
5632 }
5633 }
5525 5634
5526 /* For Mezz card, port speed entered needs to be checked */ 5635 /* For Mezz card, port speed entered needs to be checked */
5527 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { 5636 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index fbe513a671b5..95adb86d3769 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -297,6 +297,7 @@ struct bfa_rport_s {
297 void *rport_drv; /* fcs/driver rport object */ 297 void *rport_drv; /* fcs/driver rport object */
298 u16 fw_handle; /* firmware rport handle */ 298 u16 fw_handle; /* firmware rport handle */
299 u16 rport_tag; /* BFA rport tag */ 299 u16 rport_tag; /* BFA rport tag */
300 u8 lun_mask; /* LUN mask flag */
300 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ 301 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
301 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 302 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
302 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ 303 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
@@ -404,6 +405,7 @@ struct bfa_lps_s {
404 u8 bb_scn; /* local BB_SCN */ 405 u8 bb_scn; /* local BB_SCN */
405 u8 lsrjt_rsn; /* LSRJT reason */ 406 u8 lsrjt_rsn; /* LSRJT reason */
406 u8 lsrjt_expl; /* LSRJT explanation */ 407 u8 lsrjt_expl; /* LSRJT explanation */
408 u8 lun_mask; /* LUN mask flag */
407 wwn_t pwwn; /* port wwn of lport */ 409 wwn_t pwwn; /* port wwn of lport */
408 wwn_t nwwn; /* node wwn of lport */ 410 wwn_t nwwn; /* node wwn of lport */
409 wwn_t pr_pwwn; /* port wwn of lport peer */ 411 wwn_t pr_pwwn; /* port wwn of lport peer */
@@ -441,7 +443,6 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
441 */ 443 */
442 444
443#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) 445#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
444typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
445 446
446/* 447/*
447 * Link notification data structure 448 * Link notification data structure
@@ -495,13 +496,11 @@ struct bfa_fcport_s {
495 u8 *stats_kva; 496 u8 *stats_kva;
496 u64 stats_pa; 497 u64 stats_pa;
497 union bfa_fcport_stats_u *stats; 498 union bfa_fcport_stats_u *stats;
498 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
499 bfa_status_t stats_status; /* stats/statsclr status */ 499 bfa_status_t stats_status; /* stats/statsclr status */
500 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ 500 struct list_head stats_pending_q;
501 struct list_head statsclr_pending_q;
501 bfa_boolean_t stats_qfull; 502 bfa_boolean_t stats_qfull;
502 u32 stats_reset_time; /* stats reset time stamp */ 503 u32 stats_reset_time; /* stats reset time stamp */
503 bfa_cb_port_t stats_cbfn; /* driver callback function */
504 void *stats_cbarg; /* *!< user callback arg */
505 bfa_boolean_t diag_busy; /* diag busy status */ 504 bfa_boolean_t diag_busy; /* diag busy status */
506 bfa_boolean_t beacon; /* port beacon status */ 505 bfa_boolean_t beacon; /* port beacon status */
507 bfa_boolean_t link_e2e_beacon; /* link beacon status */ 506 bfa_boolean_t link_e2e_beacon; /* link beacon status */
@@ -552,10 +551,9 @@ void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
552 bfa_boolean_t link_e2e_beacon); 551 bfa_boolean_t link_e2e_beacon);
553bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); 552bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
554bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, 553bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
555 union bfa_fcport_stats_u *stats, 554 struct bfa_cb_pending_q_s *cb);
556 bfa_cb_port_t cbfn, void *cbarg); 555bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
557bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, 556 struct bfa_cb_pending_q_s *cb);
558 void *cbarg);
559bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); 557bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
560bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); 558bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
561bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); 559bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
@@ -578,6 +576,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
578 struct bfa_rport_qos_attr_s new_qos_attr); 576 struct bfa_rport_qos_attr_s new_qos_attr);
579 577
580/* 578/*
579 * Rport LUN masking related
580 */
581#define BFA_RPORT_TAG_INVALID 0xffff
582#define BFA_LP_TAG_INVALID 0xff
583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
585bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
586wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
587struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
588 wwn_t *lpwwn, wwn_t rpwwn);
589void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
590
591/*
581 * bfa fcxp API functions 592 * bfa fcxp API functions
582 */ 593 */
583struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa, 594struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index beb30a748ea5..66fb72531b34 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1348,7 +1348,7 @@ int
1348bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 1348bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1349{ 1349{
1350 struct bfad_s *bfad; 1350 struct bfad_s *bfad;
1351 int error = -ENODEV, retval; 1351 int error = -ENODEV, retval, i;
1352 1352
1353 /* For single port cards - only claim function 0 */ 1353 /* For single port cards - only claim function 0 */
1354 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && 1354 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
@@ -1372,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1372 bfa_trc_init(bfad->trcmod); 1372 bfa_trc_init(bfad->trcmod);
1373 bfa_trc(bfad, bfad_inst); 1373 bfa_trc(bfad, bfad_inst);
1374 1374
1375 /* AEN INIT */
1376 INIT_LIST_HEAD(&bfad->free_aen_q);
1377 INIT_LIST_HEAD(&bfad->active_aen_q);
1378 for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
1379 list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
1380
1375 if (!(bfad_load_fwimg(pdev))) { 1381 if (!(bfad_load_fwimg(pdev))) {
1376 kfree(bfad->trcmod); 1382 kfree(bfad->trcmod);
1377 goto out_alloc_trace_failure; 1383 goto out_alloc_trace_failure;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 89f863ed2334..06fc00caeb41 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -56,7 +56,7 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
56 spin_lock_irqsave(&bfad->bfad_lock, flags); 56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 if (bfad->disable_active) { 57 if (bfad->disable_active) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 58 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 return EBUSY; 59 return -EBUSY;
60 } 60 }
61 61
62 bfad->disable_active = BFA_TRUE; 62 bfad->disable_active = BFA_TRUE;
@@ -90,6 +90,7 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); 90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
91 iocmd->factorynwwn = pattr.factorynwwn; 91 iocmd->factorynwwn = pattr.factorynwwn;
92 iocmd->factorypwwn = pattr.factorypwwn; 92 iocmd->factorypwwn = pattr.factorypwwn;
93 iocmd->bfad_num = bfad->inst_no;
93 im_port = bfad->pport.im_port; 94 im_port = bfad->pport.im_port;
94 iocmd->host = im_port->shost->host_no; 95 iocmd->host = im_port->shost->host_no;
95 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 96 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -178,6 +179,38 @@ out:
178} 179}
179 180
180int 181int
182bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
183{
184 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
185 unsigned long flags;
186
187 if (v_cmd == IOCMD_IOC_RESET_STATS) {
188 bfa_ioc_clear_stats(&bfad->bfa);
189 iocmd->status = BFA_STATUS_OK;
190 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
191 spin_lock_irqsave(&bfad->bfad_lock, flags);
192 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
193 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
194 }
195
196 return 0;
197}
198
199int
200bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
201{
202 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
203
204 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
205 strcpy(bfad->adapter_name, iocmd->name);
206 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
207 strcpy(bfad->port_name, iocmd->name);
208
209 iocmd->status = BFA_STATUS_OK;
210 return 0;
211}
212
213int
181bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) 214bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
182{ 215{
183 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; 216 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
@@ -306,6 +339,81 @@ out:
306 return 0; 339 return 0;
307} 340}
308 341
342int
343bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
344{
345 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
346 struct bfad_hal_comp fcomp;
347 unsigned long flags;
348
349 init_completion(&fcomp.comp);
350 spin_lock_irqsave(&bfad->bfad_lock, flags);
351 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
352 bfad_hcb_comp, &fcomp);
353 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
354 if (iocmd->status != BFA_STATUS_OK) {
355 bfa_trc(bfad, iocmd->status);
356 return 0;
357 }
358 wait_for_completion(&fcomp.comp);
359 iocmd->status = fcomp.status;
360 return 0;
361}
362
363int
364bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
365{
366 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
367 unsigned long flags;
368
369 spin_lock_irqsave(&bfad->bfad_lock, flags);
370 if (v_cmd == IOCMD_PORT_CFG_TOPO)
371 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
372 else if (v_cmd == IOCMD_PORT_CFG_SPEED)
373 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
374 else if (v_cmd == IOCMD_PORT_CFG_ALPA)
375 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
376 else if (v_cmd == IOCMD_PORT_CLR_ALPA)
377 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
378 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
379
380 return 0;
381}
382
383int
384bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
385{
386 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
387 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
388 unsigned long flags;
389
390 spin_lock_irqsave(&bfad->bfad_lock, flags);
391 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
392 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
393
394 return 0;
395}
396
397int
398bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
399{
400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
401 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
402 unsigned long flags;
403
404 spin_lock_irqsave(&bfad->bfad_lock, flags);
405 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
406 if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
407 fcport->cfg.bb_scn_state = BFA_TRUE;
408 else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
409 fcport->cfg.bb_scn_state = BFA_FALSE;
410 }
411 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
412
413 iocmd->status = BFA_STATUS_OK;
414 return 0;
415}
416
309static int 417static int
310bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) 418bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
311{ 419{
@@ -354,6 +462,40 @@ out:
354} 462}
355 463
356int 464int
465bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
466{
467 struct bfa_fcs_lport_s *fcs_port;
468 struct bfa_bsg_reset_stats_s *iocmd =
469 (struct bfa_bsg_reset_stats_s *)cmd;
470 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
471 struct list_head *qe, *qen;
472 struct bfa_itnim_s *itnim;
473 unsigned long flags;
474
475 spin_lock_irqsave(&bfad->bfad_lock, flags);
476 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
477 iocmd->vf_id, iocmd->vpwwn);
478 if (fcs_port == NULL) {
479 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
480 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
481 goto out;
482 }
483
484 bfa_fcs_lport_clear_stats(fcs_port);
485 /* clear IO stats from all active itnims */
486 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
487 itnim = (struct bfa_itnim_s *) qe;
488 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
489 continue;
490 bfa_itnim_clear_stats(itnim);
491 }
492 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
493 iocmd->status = BFA_STATUS_OK;
494out:
495 return 0;
496}
497
498int
357bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) 499bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
358{ 500{
359 struct bfa_fcs_lport_s *fcs_port; 501 struct bfa_fcs_lport_s *fcs_port;
@@ -389,7 +531,7 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
389 void *iocmd_bufptr; 531 void *iocmd_bufptr;
390 532
391 if (iocmd->nrports == 0) 533 if (iocmd->nrports == 0)
392 return EINVAL; 534 return -EINVAL;
393 535
394 if (bfad_chk_iocmd_sz(payload_len, 536 if (bfad_chk_iocmd_sz(payload_len,
395 sizeof(struct bfa_bsg_lport_get_rports_s), 537 sizeof(struct bfa_bsg_lport_get_rports_s),
@@ -539,6 +681,152 @@ out:
539 return 0; 681 return 0;
540} 682}
541 683
684int
685bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
686{
687 struct bfa_bsg_rport_reset_stats_s *iocmd =
688 (struct bfa_bsg_rport_reset_stats_s *)cmd;
689 struct bfa_fcs_lport_s *fcs_port;
690 struct bfa_fcs_rport_s *fcs_rport;
691 struct bfa_rport_s *rport;
692 unsigned long flags;
693
694 spin_lock_irqsave(&bfad->bfad_lock, flags);
695 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
696 iocmd->vf_id, iocmd->pwwn);
697 if (fcs_port == NULL) {
698 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
699 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
700 goto out;
701 }
702
703 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
704 if (fcs_rport == NULL) {
705 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
706 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
707 goto out;
708 }
709
710 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
711 rport = bfa_fcs_rport_get_halrport(fcs_rport);
712 memset(&rport->stats, 0, sizeof(rport->stats));
713 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
714 iocmd->status = BFA_STATUS_OK;
715out:
716 return 0;
717}
718
719int
720bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
721{
722 struct bfa_bsg_rport_set_speed_s *iocmd =
723 (struct bfa_bsg_rport_set_speed_s *)cmd;
724 struct bfa_fcs_lport_s *fcs_port;
725 struct bfa_fcs_rport_s *fcs_rport;
726 unsigned long flags;
727
728 spin_lock_irqsave(&bfad->bfad_lock, flags);
729 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
730 iocmd->vf_id, iocmd->pwwn);
731 if (fcs_port == NULL) {
732 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
733 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
734 goto out;
735 }
736
737 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
738 if (fcs_rport == NULL) {
739 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
740 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
741 goto out;
742 }
743
744 fcs_rport->rpf.assigned_speed = iocmd->speed;
745 /* Set this speed in f/w only if the RPSC speed is not available */
746 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
747 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
748 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
749 iocmd->status = BFA_STATUS_OK;
750out:
751 return 0;
752}
753
754int
755bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
756{
757 struct bfa_fcs_vport_s *fcs_vport;
758 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
759 unsigned long flags;
760
761 spin_lock_irqsave(&bfad->bfad_lock, flags);
762 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
763 iocmd->vf_id, iocmd->vpwwn);
764 if (fcs_vport == NULL) {
765 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
766 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
767 goto out;
768 }
769
770 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
771 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
772 iocmd->status = BFA_STATUS_OK;
773out:
774 return 0;
775}
776
777int
778bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
779{
780 struct bfa_fcs_vport_s *fcs_vport;
781 struct bfa_bsg_vport_stats_s *iocmd =
782 (struct bfa_bsg_vport_stats_s *)cmd;
783 unsigned long flags;
784
785 spin_lock_irqsave(&bfad->bfad_lock, flags);
786 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
787 iocmd->vf_id, iocmd->vpwwn);
788 if (fcs_vport == NULL) {
789 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
790 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
791 goto out;
792 }
793
794 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
795 sizeof(struct bfa_vport_stats_s));
796 memcpy((void *)&iocmd->vport_stats.port_stats,
797 (void *)&fcs_vport->lport.stats,
798 sizeof(struct bfa_lport_stats_s));
799 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
800 iocmd->status = BFA_STATUS_OK;
801out:
802 return 0;
803}
804
805int
806bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
807{
808 struct bfa_fcs_vport_s *fcs_vport;
809 struct bfa_bsg_reset_stats_s *iocmd =
810 (struct bfa_bsg_reset_stats_s *)cmd;
811 unsigned long flags;
812
813 spin_lock_irqsave(&bfad->bfad_lock, flags);
814 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
815 iocmd->vf_id, iocmd->vpwwn);
816 if (fcs_vport == NULL) {
817 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
818 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
819 goto out;
820 }
821
822 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
823 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
824 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
825 iocmd->status = BFA_STATUS_OK;
826out:
827 return 0;
828}
829
542static int 830static int
543bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, 831bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
544 unsigned int payload_len) 832 unsigned int payload_len)
@@ -582,6 +870,66 @@ out:
582} 870}
583 871
584int 872int
873bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
874{
875 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
877 unsigned long flags;
878
879 spin_lock_irqsave(&bfad->bfad_lock, flags);
880
881 if (cmd == IOCMD_RATELIM_ENABLE)
882 fcport->cfg.ratelimit = BFA_TRUE;
883 else if (cmd == IOCMD_RATELIM_DISABLE)
884 fcport->cfg.ratelimit = BFA_FALSE;
885
886 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
887 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
888
889 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
890 iocmd->status = BFA_STATUS_OK;
891
892 return 0;
893}
894
895int
896bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
897{
898 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
899 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
900 unsigned long flags;
901
902 spin_lock_irqsave(&bfad->bfad_lock, flags);
903
904 /* Auto and speeds greater than the supported speed, are invalid */
905 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
906 (iocmd->speed > fcport->speed_sup)) {
907 iocmd->status = BFA_STATUS_UNSUPP_SPEED;
908 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
909 return 0;
910 }
911
912 fcport->cfg.trl_def_speed = iocmd->speed;
913 iocmd->status = BFA_STATUS_OK;
914 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
915
916 return 0;
917}
918
919int
920bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
921{
922 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
923 unsigned long flags;
924
925 spin_lock_irqsave(&bfad->bfad_lock, flags);
926 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
927 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
928 iocmd->status = BFA_STATUS_OK;
929 return 0;
930}
931
932int
585bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) 933bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
586{ 934{
587 struct bfa_bsg_fcpim_modstats_s *iocmd = 935 struct bfa_bsg_fcpim_modstats_s *iocmd =
@@ -604,6 +952,28 @@ bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
604} 952}
605 953
606int 954int
955bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
956{
957 struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
958 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
959 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
960 struct list_head *qe, *qen;
961 struct bfa_itnim_s *itnim;
962 unsigned long flags;
963
964 spin_lock_irqsave(&bfad->bfad_lock, flags);
965 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
966 itnim = (struct bfa_itnim_s *) qe;
967 bfa_itnim_clear_stats(itnim);
968 }
969 memset(&fcpim->del_itn_stats, 0,
970 sizeof(struct bfa_fcpim_del_itn_stats_s));
971 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
972 iocmd->status = BFA_STATUS_OK;
973 return 0;
974}
975
976int
607bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) 977bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
608{ 978{
609 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = 979 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
@@ -670,6 +1040,35 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
670} 1040}
671 1041
672static int 1042static int
1043bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1044{
1045 struct bfa_bsg_rport_reset_stats_s *iocmd =
1046 (struct bfa_bsg_rport_reset_stats_s *)cmd;
1047 struct bfa_fcs_lport_s *fcs_port;
1048 struct bfa_fcs_itnim_s *itnim;
1049 unsigned long flags;
1050
1051 spin_lock_irqsave(&bfad->bfad_lock, flags);
1052 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1053 iocmd->vf_id, iocmd->pwwn);
1054 if (!fcs_port)
1055 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1056 else {
1057 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1058 if (itnim == NULL)
1059 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1060 else {
1061 iocmd->status = BFA_STATUS_OK;
1062 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1063 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1064 }
1065 }
1066 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1067
1068 return 0;
1069}
1070
1071static int
673bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) 1072bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
674{ 1073{
675 struct bfa_bsg_itnim_itnstats_s *iocmd = 1074 struct bfa_bsg_itnim_itnstats_s *iocmd =
@@ -1511,11 +1910,545 @@ out:
1511 return 0; 1910 return 0;
1512} 1911}
1513 1912
1913#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
1914int
1915bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
1916 unsigned int payload_len)
1917{
1918 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1919 void *iocmd_bufptr;
1920 unsigned long flags;
1921
1922 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
1923 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
1924 iocmd->status = BFA_STATUS_VERSION_FAIL;
1925 return 0;
1926 }
1927
1928 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
1929 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
1930 !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
1931 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
1932 iocmd->status = BFA_STATUS_EINVAL;
1933 goto out;
1934 }
1935
1936 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1937 spin_lock_irqsave(&bfad->bfad_lock, flags);
1938 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
1939 (u32 *)&iocmd->offset, &iocmd->bufsz);
1940 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1941out:
1942 return 0;
1943}
1944
1945int
1946bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1947{
1948 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1949 unsigned long flags;
1950
1951 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
1952 spin_lock_irqsave(&bfad->bfad_lock, flags);
1953 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
1954 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1955 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
1956 bfad->plog_buf.head = bfad->plog_buf.tail = 0;
1957 else if (v_cmd == IOCMD_DEBUG_START_DTRC)
1958 bfa_trc_init(bfad->trcmod);
1959 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
1960 bfa_trc_stop(bfad->trcmod);
1961
1962 iocmd->status = BFA_STATUS_OK;
1963 return 0;
1964}
1965
1966int
1967bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
1968{
1969 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
1970
1971 if (iocmd->ctl == BFA_TRUE)
1972 bfad->plog_buf.plog_enabled = 1;
1973 else
1974 bfad->plog_buf.plog_enabled = 0;
1975
1976 iocmd->status = BFA_STATUS_OK;
1977 return 0;
1978}
1979
1980int
1981bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1982{
1983 struct bfa_bsg_fcpim_profile_s *iocmd =
1984 (struct bfa_bsg_fcpim_profile_s *)cmd;
1985 struct timeval tv;
1986 unsigned long flags;
1987
1988 do_gettimeofday(&tv);
1989 spin_lock_irqsave(&bfad->bfad_lock, flags);
1990 if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
1991 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
1992 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
1993 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
1994 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1995
1996 return 0;
1997}
1998
1999static int
2000bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2001{
2002 struct bfa_bsg_itnim_ioprofile_s *iocmd =
2003 (struct bfa_bsg_itnim_ioprofile_s *)cmd;
2004 struct bfa_fcs_lport_s *fcs_port;
2005 struct bfa_fcs_itnim_s *itnim;
2006 unsigned long flags;
2007
2008 spin_lock_irqsave(&bfad->bfad_lock, flags);
2009 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2010 iocmd->vf_id, iocmd->lpwwn);
2011 if (!fcs_port)
2012 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
2013 else {
2014 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2015 if (itnim == NULL)
2016 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
2017 else
2018 iocmd->status = bfa_itnim_get_ioprofile(
2019 bfa_fcs_itnim_get_halitn(itnim),
2020 &iocmd->ioprofile);
2021 }
2022 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2023 return 0;
2024}
2025
2026int
2027bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2028{
2029 struct bfa_bsg_fcport_stats_s *iocmd =
2030 (struct bfa_bsg_fcport_stats_s *)cmd;
2031 struct bfad_hal_comp fcomp;
2032 unsigned long flags;
2033 struct bfa_cb_pending_q_s cb_qe;
2034
2035 init_completion(&fcomp.comp);
2036 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2037 &fcomp, &iocmd->stats);
2038 spin_lock_irqsave(&bfad->bfad_lock, flags);
2039 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2040 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2041 if (iocmd->status != BFA_STATUS_OK) {
2042 bfa_trc(bfad, iocmd->status);
2043 goto out;
2044 }
2045 wait_for_completion(&fcomp.comp);
2046 iocmd->status = fcomp.status;
2047out:
2048 return 0;
2049}
2050
2051int
2052bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2053{
2054 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2055 struct bfad_hal_comp fcomp;
2056 unsigned long flags;
2057 struct bfa_cb_pending_q_s cb_qe;
2058
2059 init_completion(&fcomp.comp);
2060 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2061
2062 spin_lock_irqsave(&bfad->bfad_lock, flags);
2063 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2064 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2065 if (iocmd->status != BFA_STATUS_OK) {
2066 bfa_trc(bfad, iocmd->status);
2067 goto out;
2068 }
2069 wait_for_completion(&fcomp.comp);
2070 iocmd->status = fcomp.status;
2071out:
2072 return 0;
2073}
2074
2075int
2076bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2077{
2078 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2079 struct bfad_hal_comp fcomp;
2080 unsigned long flags;
2081
2082 init_completion(&fcomp.comp);
2083 spin_lock_irqsave(&bfad->bfad_lock, flags);
2084 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2085 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
2086 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2087 bfad_hcb_comp, &fcomp);
2088 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2089 if (iocmd->status != BFA_STATUS_OK)
2090 goto out;
2091 wait_for_completion(&fcomp.comp);
2092 iocmd->status = fcomp.status;
2093out:
2094 return 0;
2095}
2096
2097int
2098bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2099{
2100 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2101 struct bfad_hal_comp fcomp;
2102 unsigned long flags;
2103
2104 init_completion(&fcomp.comp);
2105 spin_lock_irqsave(&bfad->bfad_lock, flags);
2106 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2107 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
2108 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2109 bfad_hcb_comp, &fcomp);
2110 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2111 if (iocmd->status != BFA_STATUS_OK)
2112 goto out;
2113 wait_for_completion(&fcomp.comp);
2114 iocmd->status = fcomp.status;
2115out:
2116 return 0;
2117}
2118
2119int
2120bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2121{
2122 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2123 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2124 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2125 unsigned long flags;
2126
2127 spin_lock_irqsave(&bfad->bfad_lock, flags);
2128 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2129 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2130 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2131 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2132 iocmd->status = BFA_STATUS_OK;
2133 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2134
2135 return 0;
2136}
2137
2138int
2139bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2140{
2141 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2142 struct bfad_hal_comp fcomp;
2143 unsigned long flags;
2144
2145 init_completion(&fcomp.comp);
2146 spin_lock_irqsave(&bfad->bfad_lock, flags);
2147 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2148 BFA_FLASH_PART_PXECFG,
2149 bfad->bfa.ioc.port_id, &iocmd->cfg,
2150 sizeof(struct bfa_ethboot_cfg_s), 0,
2151 bfad_hcb_comp, &fcomp);
2152 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2153 if (iocmd->status != BFA_STATUS_OK)
2154 goto out;
2155 wait_for_completion(&fcomp.comp);
2156 iocmd->status = fcomp.status;
2157out:
2158 return 0;
2159}
2160
2161int
2162bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2163{
2164 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2165 struct bfad_hal_comp fcomp;
2166 unsigned long flags;
2167
2168 init_completion(&fcomp.comp);
2169 spin_lock_irqsave(&bfad->bfad_lock, flags);
2170 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2171 BFA_FLASH_PART_PXECFG,
2172 bfad->bfa.ioc.port_id, &iocmd->cfg,
2173 sizeof(struct bfa_ethboot_cfg_s), 0,
2174 bfad_hcb_comp, &fcomp);
2175 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2176 if (iocmd->status != BFA_STATUS_OK)
2177 goto out;
2178 wait_for_completion(&fcomp.comp);
2179 iocmd->status = fcomp.status;
2180out:
2181 return 0;
2182}
2183
2184int
2185bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2186{
2187 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2188 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2189 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2190 unsigned long flags;
2191
2192 spin_lock_irqsave(&bfad->bfad_lock, flags);
2193
2194 if (v_cmd == IOCMD_TRUNK_ENABLE) {
2195 trunk->attr.state = BFA_TRUNK_OFFLINE;
2196 bfa_fcport_disable(&bfad->bfa);
2197 fcport->cfg.trunked = BFA_TRUE;
2198 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2199 trunk->attr.state = BFA_TRUNK_DISABLED;
2200 bfa_fcport_disable(&bfad->bfa);
2201 fcport->cfg.trunked = BFA_FALSE;
2202 }
2203
2204 if (!bfa_fcport_is_disabled(&bfad->bfa))
2205 bfa_fcport_enable(&bfad->bfa);
2206
2207 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2208
2209 iocmd->status = BFA_STATUS_OK;
2210 return 0;
2211}
2212
2213int
2214bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2215{
2216 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2217 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2218 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2219 unsigned long flags;
2220
2221 spin_lock_irqsave(&bfad->bfad_lock, flags);
2222 memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2223 sizeof(struct bfa_trunk_attr_s));
2224 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2225 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2226
2227 iocmd->status = BFA_STATUS_OK;
2228 return 0;
2229}
2230
2231int
2232bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2233{
2234 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2235 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2236 unsigned long flags;
2237
2238 spin_lock_irqsave(&bfad->bfad_lock, flags);
2239 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2240 if (v_cmd == IOCMD_QOS_ENABLE)
2241 fcport->cfg.qos_enabled = BFA_TRUE;
2242 else if (v_cmd == IOCMD_QOS_DISABLE)
2243 fcport->cfg.qos_enabled = BFA_FALSE;
2244 }
2245 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2246
2247 iocmd->status = BFA_STATUS_OK;
2248 return 0;
2249}
2250
2251int
2252bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2253{
2254 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2255 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2256 unsigned long flags;
2257
2258 spin_lock_irqsave(&bfad->bfad_lock, flags);
2259 iocmd->attr.state = fcport->qos_attr.state;
2260 iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
2261 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2262
2263 iocmd->status = BFA_STATUS_OK;
2264 return 0;
2265}
2266
2267int
2268bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2269{
2270 struct bfa_bsg_qos_vc_attr_s *iocmd =
2271 (struct bfa_bsg_qos_vc_attr_s *)cmd;
2272 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2273 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2274 unsigned long flags;
2275 u32 i = 0;
2276
2277 spin_lock_irqsave(&bfad->bfad_lock, flags);
2278 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2279 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
2280 iocmd->attr.elp_opmode_flags =
2281 be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2282
2283 /* Individual VC info */
2284 while (i < iocmd->attr.total_vc_count) {
2285 iocmd->attr.vc_info[i].vc_credit =
2286 bfa_vc_attr->vc_info[i].vc_credit;
2287 iocmd->attr.vc_info[i].borrow_credit =
2288 bfa_vc_attr->vc_info[i].borrow_credit;
2289 iocmd->attr.vc_info[i].priority =
2290 bfa_vc_attr->vc_info[i].priority;
2291 i++;
2292 }
2293 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2294
2295 iocmd->status = BFA_STATUS_OK;
2296 return 0;
2297}
2298
2299int
2300bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2301{
2302 struct bfa_bsg_fcport_stats_s *iocmd =
2303 (struct bfa_bsg_fcport_stats_s *)cmd;
2304 struct bfad_hal_comp fcomp;
2305 unsigned long flags;
2306 struct bfa_cb_pending_q_s cb_qe;
2307
2308 init_completion(&fcomp.comp);
2309 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2310 &fcomp, &iocmd->stats);
2311
2312 spin_lock_irqsave(&bfad->bfad_lock, flags);
2313 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2314 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2315 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2316 if (iocmd->status != BFA_STATUS_OK) {
2317 bfa_trc(bfad, iocmd->status);
2318 goto out;
2319 }
2320 wait_for_completion(&fcomp.comp);
2321 iocmd->status = fcomp.status;
2322out:
2323 return 0;
2324}
2325
2326int
2327bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2328{
2329 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2330 struct bfad_hal_comp fcomp;
2331 unsigned long flags;
2332 struct bfa_cb_pending_q_s cb_qe;
2333
2334 init_completion(&fcomp.comp);
2335 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2336 &fcomp, NULL);
2337
2338 spin_lock_irqsave(&bfad->bfad_lock, flags);
2339 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2340 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2341 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2342 if (iocmd->status != BFA_STATUS_OK) {
2343 bfa_trc(bfad, iocmd->status);
2344 goto out;
2345 }
2346 wait_for_completion(&fcomp.comp);
2347 iocmd->status = fcomp.status;
2348out:
2349 return 0;
2350}
2351
2352int
2353bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2354{
2355 struct bfa_bsg_vf_stats_s *iocmd =
2356 (struct bfa_bsg_vf_stats_s *)cmd;
2357 struct bfa_fcs_fabric_s *fcs_vf;
2358 unsigned long flags;
2359
2360 spin_lock_irqsave(&bfad->bfad_lock, flags);
2361 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2362 if (fcs_vf == NULL) {
2363 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2364 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2365 goto out;
2366 }
2367 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2368 sizeof(struct bfa_vf_stats_s));
2369 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2370 iocmd->status = BFA_STATUS_OK;
2371out:
2372 return 0;
2373}
2374
2375int
2376bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2377{
2378 struct bfa_bsg_vf_reset_stats_s *iocmd =
2379 (struct bfa_bsg_vf_reset_stats_s *)cmd;
2380 struct bfa_fcs_fabric_s *fcs_vf;
2381 unsigned long flags;
2382
2383 spin_lock_irqsave(&bfad->bfad_lock, flags);
2384 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2385 if (fcs_vf == NULL) {
2386 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2387 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2388 goto out;
2389 }
2390 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2391 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2392 iocmd->status = BFA_STATUS_OK;
2393out:
2394 return 0;
2395}
2396
2397int
2398bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2399{
2400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2401 unsigned long flags;
2402
2403 spin_lock_irqsave(&bfad->bfad_lock, flags);
2404 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
2405 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2406 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
2407 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2408 else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2409 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2410 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2411 return 0;
2412}
2413
2414int
2415bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2416{
2417 struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2418 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2419 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2420 unsigned long flags;
2421
2422 spin_lock_irqsave(&bfad->bfad_lock, flags);
2423 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2424 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2425 return 0;
2426}
2427
2428int
2429bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2430{
2431 struct bfa_bsg_fcpim_lunmask_s *iocmd =
2432 (struct bfa_bsg_fcpim_lunmask_s *)cmd;
2433 unsigned long flags;
2434
2435 spin_lock_irqsave(&bfad->bfad_lock, flags);
2436 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2437 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2438 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2439 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2440 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2441 iocmd->vf_id, &iocmd->pwwn,
2442 iocmd->rpwwn, iocmd->lun);
2443 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2444 return 0;
2445}
2446
1514static int 2447static int
1515bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2448bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1516 unsigned int payload_len) 2449 unsigned int payload_len)
1517{ 2450{
1518 int rc = EINVAL; 2451 int rc = -EINVAL;
1519 2452
1520 switch (cmd) { 2453 switch (cmd) {
1521 case IOCMD_IOC_ENABLE: 2454 case IOCMD_IOC_ENABLE:
@@ -1536,6 +2469,14 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1536 case IOCMD_IOC_GET_FWSTATS: 2469 case IOCMD_IOC_GET_FWSTATS:
1537 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); 2470 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
1538 break; 2471 break;
2472 case IOCMD_IOC_RESET_STATS:
2473 case IOCMD_IOC_RESET_FWSTATS:
2474 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2475 break;
2476 case IOCMD_IOC_SET_ADAPTER_NAME:
2477 case IOCMD_IOC_SET_PORT_NAME:
2478 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2479 break;
1539 case IOCMD_IOCFC_GET_ATTR: 2480 case IOCMD_IOCFC_GET_ATTR:
1540 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); 2481 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
1541 break; 2482 break;
@@ -1554,12 +2495,31 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1554 case IOCMD_PORT_GET_STATS: 2495 case IOCMD_PORT_GET_STATS:
1555 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); 2496 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
1556 break; 2497 break;
2498 case IOCMD_PORT_RESET_STATS:
2499 rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2500 break;
2501 case IOCMD_PORT_CFG_TOPO:
2502 case IOCMD_PORT_CFG_SPEED:
2503 case IOCMD_PORT_CFG_ALPA:
2504 case IOCMD_PORT_CLR_ALPA:
2505 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2506 break;
2507 case IOCMD_PORT_CFG_MAXFRSZ:
2508 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2509 break;
2510 case IOCMD_PORT_BBSC_ENABLE:
2511 case IOCMD_PORT_BBSC_DISABLE:
2512 rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
2513 break;
1557 case IOCMD_LPORT_GET_ATTR: 2514 case IOCMD_LPORT_GET_ATTR:
1558 rc = bfad_iocmd_lport_get_attr(bfad, iocmd); 2515 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
1559 break; 2516 break;
1560 case IOCMD_LPORT_GET_STATS: 2517 case IOCMD_LPORT_GET_STATS:
1561 rc = bfad_iocmd_lport_get_stats(bfad, iocmd); 2518 rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
1562 break; 2519 break;
2520 case IOCMD_LPORT_RESET_STATS:
2521 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2522 break;
1563 case IOCMD_LPORT_GET_IOSTATS: 2523 case IOCMD_LPORT_GET_IOSTATS:
1564 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); 2524 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
1565 break; 2525 break;
@@ -1575,12 +2535,40 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1575 case IOCMD_RPORT_GET_STATS: 2535 case IOCMD_RPORT_GET_STATS:
1576 rc = bfad_iocmd_rport_get_stats(bfad, iocmd); 2536 rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
1577 break; 2537 break;
2538 case IOCMD_RPORT_RESET_STATS:
2539 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2540 break;
2541 case IOCMD_RPORT_SET_SPEED:
2542 rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2543 break;
2544 case IOCMD_VPORT_GET_ATTR:
2545 rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2546 break;
2547 case IOCMD_VPORT_GET_STATS:
2548 rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2549 break;
2550 case IOCMD_VPORT_RESET_STATS:
2551 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2552 break;
1578 case IOCMD_FABRIC_GET_LPORTS: 2553 case IOCMD_FABRIC_GET_LPORTS:
1579 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); 2554 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
1580 break; 2555 break;
2556 case IOCMD_RATELIM_ENABLE:
2557 case IOCMD_RATELIM_DISABLE:
2558 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2559 break;
2560 case IOCMD_RATELIM_DEF_SPEED:
2561 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2562 break;
2563 case IOCMD_FCPIM_FAILOVER:
2564 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2565 break;
1581 case IOCMD_FCPIM_MODSTATS: 2566 case IOCMD_FCPIM_MODSTATS:
1582 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); 2567 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
1583 break; 2568 break;
2569 case IOCMD_FCPIM_MODSTATSCLR:
2570 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2571 break;
1584 case IOCMD_FCPIM_DEL_ITN_STATS: 2572 case IOCMD_FCPIM_DEL_ITN_STATS:
1585 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); 2573 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
1586 break; 2574 break;
@@ -1590,6 +2578,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1590 case IOCMD_ITNIM_GET_IOSTATS: 2578 case IOCMD_ITNIM_GET_IOSTATS:
1591 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); 2579 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
1592 break; 2580 break;
2581 case IOCMD_ITNIM_RESET_STATS:
2582 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2583 break;
1593 case IOCMD_ITNIM_GET_ITNSTATS: 2584 case IOCMD_ITNIM_GET_ITNSTATS:
1594 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); 2585 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
1595 break; 2586 break;
@@ -1702,11 +2693,92 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1702 case IOCMD_DEBUG_PORTLOG: 2693 case IOCMD_DEBUG_PORTLOG:
1703 rc = bfad_iocmd_porglog_get(bfad, iocmd); 2694 rc = bfad_iocmd_porglog_get(bfad, iocmd);
1704 break; 2695 break;
2696 case IOCMD_DEBUG_FW_CORE:
2697 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
2698 break;
2699 case IOCMD_DEBUG_FW_STATE_CLR:
2700 case IOCMD_DEBUG_PORTLOG_CLR:
2701 case IOCMD_DEBUG_START_DTRC:
2702 case IOCMD_DEBUG_STOP_DTRC:
2703 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
2704 break;
2705 case IOCMD_DEBUG_PORTLOG_CTL:
2706 rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
2707 break;
2708 case IOCMD_FCPIM_PROFILE_ON:
2709 case IOCMD_FCPIM_PROFILE_OFF:
2710 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
2711 break;
2712 case IOCMD_ITNIM_GET_IOPROFILE:
2713 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
2714 break;
2715 case IOCMD_FCPORT_GET_STATS:
2716 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
2717 break;
2718 case IOCMD_FCPORT_RESET_STATS:
2719 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
2720 break;
2721 case IOCMD_BOOT_CFG:
2722 rc = bfad_iocmd_boot_cfg(bfad, iocmd);
2723 break;
2724 case IOCMD_BOOT_QUERY:
2725 rc = bfad_iocmd_boot_query(bfad, iocmd);
2726 break;
2727 case IOCMD_PREBOOT_QUERY:
2728 rc = bfad_iocmd_preboot_query(bfad, iocmd);
2729 break;
2730 case IOCMD_ETHBOOT_CFG:
2731 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
2732 break;
2733 case IOCMD_ETHBOOT_QUERY:
2734 rc = bfad_iocmd_ethboot_query(bfad, iocmd);
2735 break;
2736 case IOCMD_TRUNK_ENABLE:
2737 case IOCMD_TRUNK_DISABLE:
2738 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
2739 break;
2740 case IOCMD_TRUNK_GET_ATTR:
2741 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
2742 break;
2743 case IOCMD_QOS_ENABLE:
2744 case IOCMD_QOS_DISABLE:
2745 rc = bfad_iocmd_qos(bfad, iocmd, cmd);
2746 break;
2747 case IOCMD_QOS_GET_ATTR:
2748 rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
2749 break;
2750 case IOCMD_QOS_GET_VC_ATTR:
2751 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
2752 break;
2753 case IOCMD_QOS_GET_STATS:
2754 rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
2755 break;
2756 case IOCMD_QOS_RESET_STATS:
2757 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
2758 break;
2759 case IOCMD_VF_GET_STATS:
2760 rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
2761 break;
2762 case IOCMD_VF_RESET_STATS:
2763 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
2764 break;
2765 case IOCMD_FCPIM_LUNMASK_ENABLE:
2766 case IOCMD_FCPIM_LUNMASK_DISABLE:
2767 case IOCMD_FCPIM_LUNMASK_CLEAR:
2768 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
2769 break;
2770 case IOCMD_FCPIM_LUNMASK_QUERY:
2771 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
2772 break;
2773 case IOCMD_FCPIM_LUNMASK_ADD:
2774 case IOCMD_FCPIM_LUNMASK_DELETE:
2775 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
2776 break;
1705 default: 2777 default:
1706 rc = EINVAL; 2778 rc = -EINVAL;
1707 break; 2779 break;
1708 } 2780 }
1709 return -rc; 2781 return rc;
1710} 2782}
1711 2783
1712static int 2784static int
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 99b0e8a70c89..e859adb9aa9e 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -30,24 +30,48 @@ enum {
30 IOCMD_IOC_GET_INFO, 30 IOCMD_IOC_GET_INFO,
31 IOCMD_IOC_GET_STATS, 31 IOCMD_IOC_GET_STATS,
32 IOCMD_IOC_GET_FWSTATS, 32 IOCMD_IOC_GET_FWSTATS,
33 IOCMD_IOC_RESET_STATS,
34 IOCMD_IOC_RESET_FWSTATS,
35 IOCMD_IOC_SET_ADAPTER_NAME,
36 IOCMD_IOC_SET_PORT_NAME,
33 IOCMD_IOCFC_GET_ATTR, 37 IOCMD_IOCFC_GET_ATTR,
34 IOCMD_IOCFC_SET_INTR, 38 IOCMD_IOCFC_SET_INTR,
35 IOCMD_PORT_ENABLE, 39 IOCMD_PORT_ENABLE,
36 IOCMD_PORT_DISABLE, 40 IOCMD_PORT_DISABLE,
37 IOCMD_PORT_GET_ATTR, 41 IOCMD_PORT_GET_ATTR,
38 IOCMD_PORT_GET_STATS, 42 IOCMD_PORT_GET_STATS,
43 IOCMD_PORT_RESET_STATS,
44 IOCMD_PORT_CFG_TOPO,
45 IOCMD_PORT_CFG_SPEED,
46 IOCMD_PORT_CFG_ALPA,
47 IOCMD_PORT_CFG_MAXFRSZ,
48 IOCMD_PORT_CLR_ALPA,
49 IOCMD_PORT_BBSC_ENABLE,
50 IOCMD_PORT_BBSC_DISABLE,
39 IOCMD_LPORT_GET_ATTR, 51 IOCMD_LPORT_GET_ATTR,
40 IOCMD_LPORT_GET_RPORTS, 52 IOCMD_LPORT_GET_RPORTS,
41 IOCMD_LPORT_GET_STATS, 53 IOCMD_LPORT_GET_STATS,
54 IOCMD_LPORT_RESET_STATS,
42 IOCMD_LPORT_GET_IOSTATS, 55 IOCMD_LPORT_GET_IOSTATS,
43 IOCMD_RPORT_GET_ATTR, 56 IOCMD_RPORT_GET_ATTR,
44 IOCMD_RPORT_GET_ADDR, 57 IOCMD_RPORT_GET_ADDR,
45 IOCMD_RPORT_GET_STATS, 58 IOCMD_RPORT_GET_STATS,
59 IOCMD_RPORT_RESET_STATS,
60 IOCMD_RPORT_SET_SPEED,
61 IOCMD_VPORT_GET_ATTR,
62 IOCMD_VPORT_GET_STATS,
63 IOCMD_VPORT_RESET_STATS,
46 IOCMD_FABRIC_GET_LPORTS, 64 IOCMD_FABRIC_GET_LPORTS,
65 IOCMD_RATELIM_ENABLE,
66 IOCMD_RATELIM_DISABLE,
67 IOCMD_RATELIM_DEF_SPEED,
68 IOCMD_FCPIM_FAILOVER,
47 IOCMD_FCPIM_MODSTATS, 69 IOCMD_FCPIM_MODSTATS,
70 IOCMD_FCPIM_MODSTATSCLR,
48 IOCMD_FCPIM_DEL_ITN_STATS, 71 IOCMD_FCPIM_DEL_ITN_STATS,
49 IOCMD_ITNIM_GET_ATTR, 72 IOCMD_ITNIM_GET_ATTR,
50 IOCMD_ITNIM_GET_IOSTATS, 73 IOCMD_ITNIM_GET_IOSTATS,
74 IOCMD_ITNIM_RESET_STATS,
51 IOCMD_ITNIM_GET_ITNSTATS, 75 IOCMD_ITNIM_GET_ITNSTATS,
52 IOCMD_IOC_PCIFN_CFG, 76 IOCMD_IOC_PCIFN_CFG,
53 IOCMD_FCPORT_ENABLE, 77 IOCMD_FCPORT_ENABLE,
@@ -86,6 +110,39 @@ enum {
86 IOCMD_PHY_READ_FW, 110 IOCMD_PHY_READ_FW,
87 IOCMD_VHBA_QUERY, 111 IOCMD_VHBA_QUERY,
88 IOCMD_DEBUG_PORTLOG, 112 IOCMD_DEBUG_PORTLOG,
113 IOCMD_DEBUG_FW_CORE,
114 IOCMD_DEBUG_FW_STATE_CLR,
115 IOCMD_DEBUG_PORTLOG_CLR,
116 IOCMD_DEBUG_START_DTRC,
117 IOCMD_DEBUG_STOP_DTRC,
118 IOCMD_DEBUG_PORTLOG_CTL,
119 IOCMD_FCPIM_PROFILE_ON,
120 IOCMD_FCPIM_PROFILE_OFF,
121 IOCMD_ITNIM_GET_IOPROFILE,
122 IOCMD_FCPORT_GET_STATS,
123 IOCMD_FCPORT_RESET_STATS,
124 IOCMD_BOOT_CFG,
125 IOCMD_BOOT_QUERY,
126 IOCMD_PREBOOT_QUERY,
127 IOCMD_ETHBOOT_CFG,
128 IOCMD_ETHBOOT_QUERY,
129 IOCMD_TRUNK_ENABLE,
130 IOCMD_TRUNK_DISABLE,
131 IOCMD_TRUNK_GET_ATTR,
132 IOCMD_QOS_ENABLE,
133 IOCMD_QOS_DISABLE,
134 IOCMD_QOS_GET_ATTR,
135 IOCMD_QOS_GET_VC_ATTR,
136 IOCMD_QOS_GET_STATS,
137 IOCMD_QOS_RESET_STATS,
138 IOCMD_VF_GET_STATS,
139 IOCMD_VF_RESET_STATS,
140 IOCMD_FCPIM_LUNMASK_ENABLE,
141 IOCMD_FCPIM_LUNMASK_DISABLE,
142 IOCMD_FCPIM_LUNMASK_CLEAR,
143 IOCMD_FCPIM_LUNMASK_QUERY,
144 IOCMD_FCPIM_LUNMASK_ADD,
145 IOCMD_FCPIM_LUNMASK_DELETE,
89}; 146};
90 147
91struct bfa_bsg_gen_s { 148struct bfa_bsg_gen_s {
@@ -94,6 +151,43 @@ struct bfa_bsg_gen_s {
94 u16 rsvd; 151 u16 rsvd;
95}; 152};
96 153
154struct bfa_bsg_portlogctl_s {
155 bfa_status_t status;
156 u16 bfad_num;
157 u16 rsvd;
158 bfa_boolean_t ctl;
159 int inst_no;
160};
161
162struct bfa_bsg_fcpim_profile_s {
163 bfa_status_t status;
164 u16 bfad_num;
165 u16 rsvd;
166};
167
168struct bfa_bsg_itnim_ioprofile_s {
169 bfa_status_t status;
170 u16 bfad_num;
171 u16 vf_id;
172 wwn_t lpwwn;
173 wwn_t rpwwn;
174 struct bfa_itnim_ioprofile_s ioprofile;
175};
176
177struct bfa_bsg_fcport_stats_s {
178 bfa_status_t status;
179 u16 bfad_num;
180 u16 rsvd;
181 union bfa_fcport_stats_u stats;
182};
183
184struct bfa_bsg_ioc_name_s {
185 bfa_status_t status;
186 u16 bfad_num;
187 u16 rsvd;
188 char name[BFA_ADAPTER_SYM_NAME_LEN];
189};
190
97struct bfa_bsg_ioc_info_s { 191struct bfa_bsg_ioc_info_s {
98 bfa_status_t status; 192 bfa_status_t status;
99 u16 bfad_num; 193 u16 bfad_num;
@@ -164,6 +258,20 @@ struct bfa_bsg_port_attr_s {
164 struct bfa_port_attr_s attr; 258 struct bfa_port_attr_s attr;
165}; 259};
166 260
261struct bfa_bsg_port_cfg_s {
262 bfa_status_t status;
263 u16 bfad_num;
264 u16 rsvd;
265 u32 param;
266 u32 rsvd1;
267};
268
269struct bfa_bsg_port_cfg_maxfrsize_s {
270 bfa_status_t status;
271 u16 bfad_num;
272 u16 maxfrsize;
273};
274
167struct bfa_bsg_port_stats_s { 275struct bfa_bsg_port_stats_s {
168 bfa_status_t status; 276 bfa_status_t status;
169 u16 bfad_num; 277 u16 bfad_num;
@@ -237,6 +345,47 @@ struct bfa_bsg_rport_scsi_addr_s {
237 u32 lun; 345 u32 lun;
238}; 346};
239 347
348struct bfa_bsg_rport_reset_stats_s {
349 bfa_status_t status;
350 u16 bfad_num;
351 u16 vf_id;
352 wwn_t pwwn;
353 wwn_t rpwwn;
354};
355
356struct bfa_bsg_rport_set_speed_s {
357 bfa_status_t status;
358 u16 bfad_num;
359 u16 vf_id;
360 enum bfa_port_speed speed;
361 u32 rsvd;
362 wwn_t pwwn;
363 wwn_t rpwwn;
364};
365
366struct bfa_bsg_vport_attr_s {
367 bfa_status_t status;
368 u16 bfad_num;
369 u16 vf_id;
370 wwn_t vpwwn;
371 struct bfa_vport_attr_s vport_attr;
372};
373
374struct bfa_bsg_vport_stats_s {
375 bfa_status_t status;
376 u16 bfad_num;
377 u16 vf_id;
378 wwn_t vpwwn;
379 struct bfa_vport_stats_s vport_stats;
380};
381
382struct bfa_bsg_reset_stats_s {
383 bfa_status_t status;
384 u16 bfad_num;
385 u16 vf_id;
386 wwn_t vpwwn;
387};
388
240struct bfa_bsg_fabric_get_lports_s { 389struct bfa_bsg_fabric_get_lports_s {
241 bfa_status_t status; 390 bfa_status_t status;
242 u16 bfad_num; 391 u16 bfad_num;
@@ -246,6 +395,19 @@ struct bfa_bsg_fabric_get_lports_s {
246 u32 rsvd; 395 u32 rsvd;
247}; 396};
248 397
398struct bfa_bsg_trl_speed_s {
399 bfa_status_t status;
400 u16 bfad_num;
401 u16 rsvd;
402 enum bfa_port_speed speed;
403};
404
405struct bfa_bsg_fcpim_s {
406 bfa_status_t status;
407 u16 bfad_num;
408 u16 param;
409};
410
249struct bfa_bsg_fcpim_modstats_s { 411struct bfa_bsg_fcpim_modstats_s {
250 bfa_status_t status; 412 bfa_status_t status;
251 u16 bfad_num; 413 u16 bfad_num;
@@ -258,6 +420,11 @@ struct bfa_bsg_fcpim_del_itn_stats_s {
258 struct bfa_fcpim_del_itn_stats_s modstats; 420 struct bfa_fcpim_del_itn_stats_s modstats;
259}; 421};
260 422
423struct bfa_bsg_fcpim_modstatsclr_s {
424 bfa_status_t status;
425 u16 bfad_num;
426};
427
261struct bfa_bsg_itnim_attr_s { 428struct bfa_bsg_itnim_attr_s {
262 bfa_status_t status; 429 bfa_status_t status;
263 u16 bfad_num; 430 u16 bfad_num;
@@ -485,6 +652,76 @@ struct bfa_bsg_vhba_attr_s {
485 struct bfa_vhba_attr_s attr; 652 struct bfa_vhba_attr_s attr;
486}; 653};
487 654
655struct bfa_bsg_boot_s {
656 bfa_status_t status;
657 u16 bfad_num;
658 u16 rsvd;
659 struct bfa_boot_cfg_s cfg;
660};
661
662struct bfa_bsg_preboot_s {
663 bfa_status_t status;
664 u16 bfad_num;
665 u16 rsvd;
666 struct bfa_boot_pbc_s cfg;
667};
668
669struct bfa_bsg_ethboot_s {
670 bfa_status_t status;
671 u16 bfad_num;
672 u16 rsvd;
673 struct bfa_ethboot_cfg_s cfg;
674};
675
676struct bfa_bsg_trunk_attr_s {
677 bfa_status_t status;
678 u16 bfad_num;
679 u16 rsvd;
680 struct bfa_trunk_attr_s attr;
681};
682
683struct bfa_bsg_qos_attr_s {
684 bfa_status_t status;
685 u16 bfad_num;
686 u16 rsvd;
687 struct bfa_qos_attr_s attr;
688};
689
690struct bfa_bsg_qos_vc_attr_s {
691 bfa_status_t status;
692 u16 bfad_num;
693 u16 rsvd;
694 struct bfa_qos_vc_attr_s attr;
695};
696
697struct bfa_bsg_vf_stats_s {
698 bfa_status_t status;
699 u16 bfad_num;
700 u16 vf_id;
701 struct bfa_vf_stats_s stats;
702};
703
704struct bfa_bsg_vf_reset_stats_s {
705 bfa_status_t status;
706 u16 bfad_num;
707 u16 vf_id;
708};
709
710struct bfa_bsg_fcpim_lunmask_query_s {
711 bfa_status_t status;
712 u16 bfad_num;
713 struct bfa_lunmask_cfg_s lun_mask;
714};
715
716struct bfa_bsg_fcpim_lunmask_s {
717 bfa_status_t status;
718 u16 bfad_num;
719 u16 vf_id;
720 wwn_t pwwn;
721 wwn_t rpwwn;
722 struct scsi_lun lun;
723};
724
488struct bfa_bsg_fcpt_s { 725struct bfa_bsg_fcpt_s {
489 bfa_status_t status; 726 bfa_status_t status;
490 u16 vf_id; 727 u16 vf_id;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 48661a2726d7..bda999ad9f52 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -56,7 +56,7 @@
56#ifdef BFA_DRIVER_VERSION 56#ifdef BFA_DRIVER_VERSION
57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
58#else 58#else
59#define BFAD_DRIVER_VERSION "3.0.2.1" 59#define BFAD_DRIVER_VERSION "3.0.2.2"
60#endif 60#endif
61 61
62#define BFAD_PROTO_NAME FCPI_NAME 62#define BFAD_PROTO_NAME FCPI_NAME
@@ -224,6 +224,10 @@ struct bfad_s {
224 char *regdata; 224 char *regdata;
225 u32 reglen; 225 u32 reglen;
226 struct dentry *bfad_dentry_files[5]; 226 struct dentry *bfad_dentry_files[5];
227 struct list_head free_aen_q;
228 struct list_head active_aen_q;
229 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
230 spinlock_t bfad_aen_spinlock;
227}; 231};
228 232
229/* BFAD state machine events */ 233/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f2bf81265ae5..01312381639f 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -656,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
656 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 656 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
657} 657}
658 658
659static void bfad_aen_im_notify_handler(struct work_struct *work)
660{
661 struct bfad_im_s *im =
662 container_of(work, struct bfad_im_s, aen_im_notify_work);
663 struct bfa_aen_entry_s *aen_entry;
664 struct bfad_s *bfad = im->bfad;
665 struct Scsi_Host *shost = bfad->pport.im_port->shost;
666 void *event_data;
667 unsigned long flags;
668
669 while (!list_empty(&bfad->active_aen_q)) {
670 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
671 bfa_q_deq(&bfad->active_aen_q, &aen_entry);
672 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
673 event_data = (char *)aen_entry + sizeof(struct list_head);
674 fc_host_post_vendor_event(shost, fc_get_event_number(),
675 sizeof(struct bfa_aen_entry_s) -
676 sizeof(struct list_head),
677 (char *)event_data, BFAD_NL_VENDOR_ID);
678 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
679 list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
680 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
681 }
682}
683
659bfa_status_t 684bfa_status_t
660bfad_im_probe(struct bfad_s *bfad) 685bfad_im_probe(struct bfad_s *bfad)
661{ 686{
@@ -676,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad)
676 rc = BFA_STATUS_FAILED; 701 rc = BFA_STATUS_FAILED;
677 } 702 }
678 703
704 INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
679ext: 705ext:
680 return rc; 706 return rc;
681} 707}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 4fe34d576b05..004b6cf848d9 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -115,8 +115,30 @@ struct bfad_im_s {
115 struct bfad_s *bfad; 115 struct bfad_s *bfad;
116 struct workqueue_struct *drv_workq; 116 struct workqueue_struct *drv_workq;
117 char drv_workq_name[KOBJ_NAME_LEN]; 117 char drv_workq_name[KOBJ_NAME_LEN];
118 struct work_struct aen_im_notify_work;
118}; 119};
119 120
121#define bfad_get_aen_entry(_drv, _entry) do { \
122 unsigned long _flags; \
123 spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \
124 bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \
125 if (_entry) \
126 list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \
127 spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \
128} while (0)
129
130/* post fc_host vendor event */
131#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
132 do_gettimeofday(&(_entry)->aen_tv); \
133 (_entry)->bfad_num = (_drv)->inst_no; \
134 (_entry)->seq_num = (_cnt); \
135 (_entry)->aen_category = (_cat); \
136 (_entry)->aen_type = (_evt); \
137 if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
138 queue_work((_drv)->im->drv_workq, \
139 &(_drv)->im->aen_im_notify_work); \
140} while (0)
141
120struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, 142struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
121 struct bfad_s *); 143 struct bfad_s *);
122bfa_status_t bfad_thread_workq(struct bfad_s *bfad); 144bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 1e258d5f8aec..b2ba0b2e91b2 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -784,6 +784,17 @@ enum bfi_sfp_i2h_e {
784}; 784};
785 785
786/* 786/*
787 * SFP state change notification
788 */
789struct bfi_sfp_scn_s {
790 struct bfi_mhdr_s mhr; /* host msg header */
791 u8 event;
792 u8 sfpid;
793 u8 pomlvl; /* pom level: normal/warning/alarm */
794 u8 is_elb; /* e-loopback */
795};
796
797/*
787 * SFP state 798 * SFP state
788 */ 799 */
789enum bfa_sfp_stat_e { 800enum bfa_sfp_stat_e {
@@ -926,6 +937,15 @@ struct bfi_flash_erase_rsp_s {
926}; 937};
927 938
928/* 939/*
940 * Flash event notification
941 */
942struct bfi_flash_event_s {
943 struct bfi_mhdr_s mh; /* Common msg header */
944 bfa_status_t status;
945 u32 param;
946};
947
948/*
929 *---------------------------------------------------------------------- 949 *----------------------------------------------------------------------
930 * DIAG 950 * DIAG
931 *---------------------------------------------------------------------- 951 *----------------------------------------------------------------------
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index d924236e1b91..42228ca5a9d2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -2,7 +2,7 @@
2#define _BNX2FC_H_ 2#define _BNX2FC_H_
3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. 3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -62,7 +62,7 @@
62#include "bnx2fc_constants.h" 62#include "bnx2fc_constants.h"
63 63
64#define BNX2FC_NAME "bnx2fc" 64#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.3" 65#define BNX2FC_VERSION "1.0.4"
66 66
67#define PFX "bnx2fc: " 67#define PFX "bnx2fc: "
68 68
@@ -141,6 +141,10 @@
141 141
142#define BNX2FC_RNID_HBA 0x7 142#define BNX2FC_RNID_HBA 0x7
143 143
144#define SRR_RETRY_COUNT 5
145#define REC_RETRY_COUNT 1
146#define BNX2FC_NUM_ERR_BITS 63
147
144/* bnx2fc driver uses only one instance of fcoe_percpu_s */ 148/* bnx2fc driver uses only one instance of fcoe_percpu_s */
145extern struct fcoe_percpu_s bnx2fc_global; 149extern struct fcoe_percpu_s bnx2fc_global;
146 150
@@ -153,18 +157,13 @@ struct bnx2fc_percpu_s {
153}; 157};
154 158
155struct bnx2fc_hba { 159struct bnx2fc_hba {
156 struct list_head link; 160 struct list_head list;
157 struct cnic_dev *cnic; 161 struct cnic_dev *cnic;
158 struct pci_dev *pcidev; 162 struct pci_dev *pcidev;
159 struct net_device *netdev;
160 struct net_device *phys_dev; 163 struct net_device *phys_dev;
161 unsigned long reg_with_cnic; 164 unsigned long reg_with_cnic;
162 #define BNX2FC_CNIC_REGISTERED 1 165 #define BNX2FC_CNIC_REGISTERED 1
163 struct packet_type fcoe_packet_type;
164 struct packet_type fip_packet_type;
165 struct bnx2fc_cmd_mgr *cmd_mgr; 166 struct bnx2fc_cmd_mgr *cmd_mgr;
166 struct workqueue_struct *timer_work_queue;
167 struct kref kref;
168 spinlock_t hba_lock; 167 spinlock_t hba_lock;
169 struct mutex hba_mutex; 168 struct mutex hba_mutex;
170 unsigned long adapter_state; 169 unsigned long adapter_state;
@@ -172,15 +171,9 @@ struct bnx2fc_hba {
172 #define ADAPTER_STATE_GOING_DOWN 1 171 #define ADAPTER_STATE_GOING_DOWN 1
173 #define ADAPTER_STATE_LINK_DOWN 2 172 #define ADAPTER_STATE_LINK_DOWN 2
174 #define ADAPTER_STATE_READY 3 173 #define ADAPTER_STATE_READY 3
175 u32 flags; 174 unsigned long flags;
176 unsigned long init_done; 175 #define BNX2FC_FLAG_FW_INIT_DONE 0
177 #define BNX2FC_FW_INIT_DONE 0 176 #define BNX2FC_FLAG_DESTROY_CMPL 1
178 #define BNX2FC_CTLR_INIT_DONE 1
179 #define BNX2FC_CREATE_DONE 2
180 struct fcoe_ctlr ctlr;
181 struct list_head vports;
182 u8 vlan_enabled;
183 int vlan_id;
184 u32 next_conn_id; 177 u32 next_conn_id;
185 struct fcoe_task_ctx_entry **task_ctx; 178 struct fcoe_task_ctx_entry **task_ctx;
186 dma_addr_t *task_ctx_dma; 179 dma_addr_t *task_ctx_dma;
@@ -199,38 +192,41 @@ struct bnx2fc_hba {
199 char *dummy_buffer; 192 char *dummy_buffer;
200 dma_addr_t dummy_buf_dma; 193 dma_addr_t dummy_buf_dma;
201 194
195 /* Active list of offloaded sessions */
196 struct bnx2fc_rport **tgt_ofld_list;
197
198 /* statistics */
202 struct fcoe_statistics_params *stats_buffer; 199 struct fcoe_statistics_params *stats_buffer;
203 dma_addr_t stats_buf_dma; 200 dma_addr_t stats_buf_dma;
204 201 struct completion stat_req_done;
205 /*
206 * PCI related info.
207 */
208 u16 pci_did;
209 u16 pci_vid;
210 u16 pci_sdid;
211 u16 pci_svid;
212 u16 pci_func;
213 u16 pci_devno;
214
215 struct task_struct *l2_thread;
216
217 /* linkdown handling */
218 wait_queue_head_t shutdown_wait;
219 int wait_for_link_down;
220 202
221 /*destroy handling */ 203 /*destroy handling */
222 struct timer_list destroy_timer; 204 struct timer_list destroy_timer;
223 wait_queue_head_t destroy_wait; 205 wait_queue_head_t destroy_wait;
224 206
225 /* Active list of offloaded sessions */ 207 /* linkdown handling */
226 struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS]; 208 wait_queue_head_t shutdown_wait;
209 int wait_for_link_down;
227 int num_ofld_sess; 210 int num_ofld_sess;
211 struct list_head vports;
212};
228 213
229 /* statistics */ 214struct bnx2fc_interface {
230 struct completion stat_req_done; 215 struct list_head list;
216 unsigned long if_flags;
217 #define BNX2FC_CTLR_INIT_DONE 0
218 struct bnx2fc_hba *hba;
219 struct net_device *netdev;
220 struct packet_type fcoe_packet_type;
221 struct packet_type fip_packet_type;
222 struct workqueue_struct *timer_work_queue;
223 struct kref kref;
224 struct fcoe_ctlr ctlr;
225 u8 vlan_enabled;
226 int vlan_id;
231}; 227};
232 228
233#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr) 229#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
234 230
235struct bnx2fc_lport { 231struct bnx2fc_lport {
236 struct list_head list; 232 struct list_head list;
@@ -252,9 +248,11 @@ struct bnx2fc_rport {
252 struct fc_rport_priv *rdata; 248 struct fc_rport_priv *rdata;
253 void __iomem *ctx_base; 249 void __iomem *ctx_base;
254#define DPM_TRIGER_TYPE 0x40 250#define DPM_TRIGER_TYPE 0x40
251 u32 io_timeout;
255 u32 fcoe_conn_id; 252 u32 fcoe_conn_id;
256 u32 context_id; 253 u32 context_id;
257 u32 sid; 254 u32 sid;
255 int dev_type;
258 256
259 unsigned long flags; 257 unsigned long flags;
260#define BNX2FC_FLAG_SESSION_READY 0x1 258#define BNX2FC_FLAG_SESSION_READY 0x1
@@ -262,10 +260,9 @@ struct bnx2fc_rport {
262#define BNX2FC_FLAG_DISABLED 0x3 260#define BNX2FC_FLAG_DISABLED 0x3
263#define BNX2FC_FLAG_DESTROYED 0x4 261#define BNX2FC_FLAG_DESTROYED 0x4
264#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5 262#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
265#define BNX2FC_FLAG_DESTROY_CMPL 0x6 263#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
266#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7 264#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
267#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8 265#define BNX2FC_FLAG_EXPL_LOGO 0x8
268#define BNX2FC_FLAG_EXPL_LOGO 0x9
269 266
270 u8 src_addr[ETH_ALEN]; 267 u8 src_addr[ETH_ALEN];
271 u32 max_sqes; 268 u32 max_sqes;
@@ -327,12 +324,9 @@ struct bnx2fc_rport {
327 spinlock_t cq_lock; 324 spinlock_t cq_lock;
328 atomic_t num_active_ios; 325 atomic_t num_active_ios;
329 u32 flush_in_prog; 326 u32 flush_in_prog;
330 unsigned long work_time_slice;
331 unsigned long timestamp; 327 unsigned long timestamp;
332 struct list_head free_task_list; 328 struct list_head free_task_list;
333 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; 329 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
334 atomic_t pi;
335 atomic_t ci;
336 struct list_head active_cmd_queue; 330 struct list_head active_cmd_queue;
337 struct list_head els_queue; 331 struct list_head els_queue;
338 struct list_head io_retire_queue; 332 struct list_head io_retire_queue;
@@ -367,6 +361,8 @@ struct bnx2fc_els_cb_arg {
367 struct bnx2fc_cmd *aborted_io_req; 361 struct bnx2fc_cmd *aborted_io_req;
368 struct bnx2fc_cmd *io_req; 362 struct bnx2fc_cmd *io_req;
369 u16 l2_oxid; 363 u16 l2_oxid;
364 u32 offset;
365 enum fc_rctl r_ctl;
370}; 366};
371 367
372/* bnx2fc command structure */ 368/* bnx2fc command structure */
@@ -380,6 +376,7 @@ struct bnx2fc_cmd {
380#define BNX2FC_ABTS 3 376#define BNX2FC_ABTS 3
381#define BNX2FC_ELS 4 377#define BNX2FC_ELS 4
382#define BNX2FC_CLEANUP 5 378#define BNX2FC_CLEANUP 5
379#define BNX2FC_SEQ_CLEANUP 6
383 u8 io_req_flags; 380 u8 io_req_flags;
384 struct kref refcount; 381 struct kref refcount;
385 struct fcoe_port *port; 382 struct fcoe_port *port;
@@ -393,6 +390,7 @@ struct bnx2fc_cmd {
393 struct completion tm_done; 390 struct completion tm_done;
394 int wait_for_comp; 391 int wait_for_comp;
395 u16 xid; 392 u16 xid;
393 struct fcoe_err_report_entry err_entry;
396 struct fcoe_task_ctx_entry *task; 394 struct fcoe_task_ctx_entry *task;
397 struct io_bdt *bd_tbl; 395 struct io_bdt *bd_tbl;
398 struct fcp_rsp *rsp; 396 struct fcp_rsp *rsp;
@@ -409,6 +407,12 @@ struct bnx2fc_cmd {
409#define BNX2FC_FLAG_IO_COMPL 0x9 407#define BNX2FC_FLAG_IO_COMPL 0x9
410#define BNX2FC_FLAG_ELS_DONE 0xa 408#define BNX2FC_FLAG_ELS_DONE 0xa
411#define BNX2FC_FLAG_ELS_TIMEOUT 0xb 409#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
410#define BNX2FC_FLAG_CMD_LOST 0xc
411#define BNX2FC_FLAG_SRR_SENT 0xd
412 u8 rec_retry;
413 u8 srr_retry;
414 u32 srr_offset;
415 u8 srr_rctl;
412 u32 fcp_resid; 416 u32 fcp_resid;
413 u32 fcp_rsp_len; 417 u32 fcp_rsp_len;
414 u32 fcp_sns_len; 418 u32 fcp_sns_len;
@@ -439,6 +443,7 @@ struct bnx2fc_unsol_els {
439 443
440 444
441 445
446struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
442struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); 447struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
443void bnx2fc_cmd_release(struct kref *ref); 448void bnx2fc_cmd_release(struct kref *ref);
444int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); 449int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
@@ -476,6 +481,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
476void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 481void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
477 struct fcoe_task_ctx_entry *task, 482 struct fcoe_task_ctx_entry *task,
478 u16 orig_xid); 483 u16 orig_xid);
484void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
485 struct fcoe_task_ctx_entry *task,
486 struct bnx2fc_cmd *orig_io_req,
487 u32 offset);
479void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, 488void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
480 struct fcoe_task_ctx_entry *task); 489 struct fcoe_task_ctx_entry *task);
481void bnx2fc_init_task(struct bnx2fc_cmd *io_req, 490void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
@@ -525,5 +534,13 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
525 unsigned char *buf, 534 unsigned char *buf,
526 u32 frame_len, u16 l2_oxid); 535 u32 frame_len, u16 l2_oxid);
527int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); 536int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
537int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
538int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
539int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
540void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
541 struct fcoe_task_ctx_entry *task,
542 u8 rx_state);
543int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
544 enum fc_rctl r_ctl);
528 545
529#endif 546#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 7f6aff68cc53..3416d9a746c7 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -21,21 +21,21 @@ extern unsigned int bnx2fc_debug_level;
21 21
22#define BNX2FC_ELS_DBG(fmt, arg...) \ 22#define BNX2FC_ELS_DBG(fmt, arg...) \
23 BNX2FC_CHK_LOGGING(LOG_ELS, \ 23 BNX2FC_CHK_LOGGING(LOG_ELS, \
24 printk(KERN_ALERT PFX fmt, ##arg)) 24 printk(KERN_INFO PFX fmt, ##arg))
25 25
26#define BNX2FC_MISC_DBG(fmt, arg...) \ 26#define BNX2FC_MISC_DBG(fmt, arg...) \
27 BNX2FC_CHK_LOGGING(LOG_MISC, \ 27 BNX2FC_CHK_LOGGING(LOG_MISC, \
28 printk(KERN_ALERT PFX fmt, ##arg)) 28 printk(KERN_INFO PFX fmt, ##arg))
29 29
30#define BNX2FC_IO_DBG(io_req, fmt, arg...) \ 30#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
31 do { \ 31 do { \
32 if (!io_req || !io_req->port || !io_req->port->lport || \ 32 if (!io_req || !io_req->port || !io_req->port->lport || \
33 !io_req->port->lport->host) \ 33 !io_req->port->lport->host) \
34 BNX2FC_CHK_LOGGING(LOG_IO, \ 34 BNX2FC_CHK_LOGGING(LOG_IO, \
35 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 35 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
36 else \ 36 else \
37 BNX2FC_CHK_LOGGING(LOG_IO, \ 37 BNX2FC_CHK_LOGGING(LOG_IO, \
38 shost_printk(KERN_ALERT, \ 38 shost_printk(KERN_INFO, \
39 (io_req)->port->lport->host, \ 39 (io_req)->port->lport->host, \
40 PFX "xid:0x%x " fmt, \ 40 PFX "xid:0x%x " fmt, \
41 (io_req)->xid, ##arg)); \ 41 (io_req)->xid, ##arg)); \
@@ -46,10 +46,10 @@ extern unsigned int bnx2fc_debug_level;
46 if (!tgt || !tgt->port || !tgt->port->lport || \ 46 if (!tgt || !tgt->port || !tgt->port->lport || \
47 !tgt->port->lport->host || !tgt->rport) \ 47 !tgt->port->lport->host || !tgt->rport) \
48 BNX2FC_CHK_LOGGING(LOG_TGT, \ 48 BNX2FC_CHK_LOGGING(LOG_TGT, \
49 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 49 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
50 else \ 50 else \
51 BNX2FC_CHK_LOGGING(LOG_TGT, \ 51 BNX2FC_CHK_LOGGING(LOG_TGT, \
52 shost_printk(KERN_ALERT, \ 52 shost_printk(KERN_INFO, \
53 (tgt)->port->lport->host, \ 53 (tgt)->port->lport->host, \
54 PFX "port:%x " fmt, \ 54 PFX "port:%x " fmt, \
55 (tgt)->rport->port_id, ##arg)); \ 55 (tgt)->rport->port_id, ##arg)); \
@@ -60,10 +60,10 @@ extern unsigned int bnx2fc_debug_level;
60 do { \ 60 do { \
61 if (!lport || !lport->host) \ 61 if (!lport || !lport->host) \
62 BNX2FC_CHK_LOGGING(LOG_HBA, \ 62 BNX2FC_CHK_LOGGING(LOG_HBA, \
63 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 63 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
64 else \ 64 else \
65 BNX2FC_CHK_LOGGING(LOG_HBA, \ 65 BNX2FC_CHK_LOGGING(LOG_HBA, \
66 shost_printk(KERN_ALERT, lport->host, \ 66 shost_printk(KERN_INFO, lport->host, \
67 PFX fmt, ##arg)); \ 67 PFX fmt, ##arg)); \
68 } while (0) 68 } while (0)
69 69
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 7e89143f15cf..d66dcbd0df10 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -3,7 +3,7 @@
3 * This file contains helper routines that handle ELS requests 3 * This file contains helper routines that handle ELS requests
4 * and responses. 4 * and responses.
5 * 5 *
6 * Copyright (c) 2008 - 2010 Broadcom Corporation 6 * Copyright (c) 2008 - 2011 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -253,13 +253,417 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
253 return rc; 253 return rc;
254} 254}
255 255
256void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
257{
258 struct bnx2fc_mp_req *mp_req;
259 struct fc_frame_header *fc_hdr, *fh;
260 struct bnx2fc_cmd *srr_req;
261 struct bnx2fc_cmd *orig_io_req;
262 struct fc_frame *fp;
263 unsigned char *buf;
264 void *resp_buf;
265 u32 resp_len, hdr_len;
266 u8 opcode;
267 int rc = 0;
268
269 orig_io_req = cb_arg->aborted_io_req;
270 srr_req = cb_arg->io_req;
271 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
272 BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
273 orig_io_req->xid);
274 goto srr_compl_done;
275 }
276 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
277 BNX2FC_IO_DBG(srr_req, "rec abts in prog "
278 "orig_io - 0x%x\n",
279 orig_io_req->xid);
280 goto srr_compl_done;
281 }
282 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
283 /* SRR timedout */
284 BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
285 "orig_io - 0x%x\n",
286 orig_io_req->xid);
287 rc = bnx2fc_initiate_abts(srr_req);
288 if (rc != SUCCESS) {
289 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
290 "failed. issue cleanup\n");
291 bnx2fc_initiate_cleanup(srr_req);
292 }
293 orig_io_req->srr_retry++;
294 if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
295 struct bnx2fc_rport *tgt = orig_io_req->tgt;
296 spin_unlock_bh(&tgt->tgt_lock);
297 rc = bnx2fc_send_srr(orig_io_req,
298 orig_io_req->srr_offset,
299 orig_io_req->srr_rctl);
300 spin_lock_bh(&tgt->tgt_lock);
301 if (!rc)
302 goto srr_compl_done;
303 }
304
305 rc = bnx2fc_initiate_abts(orig_io_req);
306 if (rc != SUCCESS) {
307 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
308 "failed xid = 0x%x. issue cleanup\n",
309 orig_io_req->xid);
310 bnx2fc_initiate_cleanup(orig_io_req);
311 }
312 goto srr_compl_done;
313 }
314 mp_req = &(srr_req->mp_req);
315 fc_hdr = &(mp_req->resp_fc_hdr);
316 resp_len = mp_req->resp_len;
317 resp_buf = mp_req->resp_buf;
318
319 hdr_len = sizeof(*fc_hdr);
320 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
321 if (!buf) {
322 printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
323 goto srr_compl_done;
324 }
325 memcpy(buf, fc_hdr, hdr_len);
326 memcpy(buf + hdr_len, resp_buf, resp_len);
327
328 fp = fc_frame_alloc(NULL, resp_len);
329 if (!fp) {
330 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
331 goto free_buf;
332 }
333
334 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
335 /* Copy FC Frame header and payload into the frame */
336 memcpy(fh, buf, hdr_len + resp_len);
337
338 opcode = fc_frame_payload_op(fp);
339 switch (opcode) {
340 case ELS_LS_ACC:
341 BNX2FC_IO_DBG(srr_req, "SRR success\n");
342 break;
343 case ELS_LS_RJT:
344 BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
345 rc = bnx2fc_initiate_abts(orig_io_req);
346 if (rc != SUCCESS) {
347 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
348 "failed xid = 0x%x. issue cleanup\n",
349 orig_io_req->xid);
350 bnx2fc_initiate_cleanup(orig_io_req);
351 }
352 break;
353 default:
354 BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
355 opcode);
356 break;
357 }
358 fc_frame_free(fp);
359free_buf:
360 kfree(buf);
361srr_compl_done:
362 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
363}
364
365void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
366{
367 struct bnx2fc_cmd *orig_io_req, *new_io_req;
368 struct bnx2fc_cmd *rec_req;
369 struct bnx2fc_mp_req *mp_req;
370 struct fc_frame_header *fc_hdr, *fh;
371 struct fc_els_ls_rjt *rjt;
372 struct fc_els_rec_acc *acc;
373 struct bnx2fc_rport *tgt;
374 struct fcoe_err_report_entry *err_entry;
375 struct scsi_cmnd *sc_cmd;
376 enum fc_rctl r_ctl;
377 unsigned char *buf;
378 void *resp_buf;
379 struct fc_frame *fp;
380 u8 opcode;
381 u32 offset;
382 u32 e_stat;
383 u32 resp_len, hdr_len;
384 int rc = 0;
385 bool send_seq_clnp = false;
386 bool abort_io = false;
387
388 BNX2FC_MISC_DBG("Entered rec_compl callback\n");
389 rec_req = cb_arg->io_req;
390 orig_io_req = cb_arg->aborted_io_req;
391 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
392 tgt = orig_io_req->tgt;
393
394 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
395 BNX2FC_IO_DBG(rec_req, "completed"
396 "orig_io - 0x%x\n",
397 orig_io_req->xid);
398 goto rec_compl_done;
399 }
400 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
401 BNX2FC_IO_DBG(rec_req, "abts in prog "
402 "orig_io - 0x%x\n",
403 orig_io_req->xid);
404 goto rec_compl_done;
405 }
406 /* Handle REC timeout case */
407 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
408 BNX2FC_IO_DBG(rec_req, "timed out, abort "
409 "orig_io - 0x%x\n",
410 orig_io_req->xid);
411 /* els req is timed out. send abts for els */
412 rc = bnx2fc_initiate_abts(rec_req);
413 if (rc != SUCCESS) {
414 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
415 "failed. issue cleanup\n");
416 bnx2fc_initiate_cleanup(rec_req);
417 }
418 orig_io_req->rec_retry++;
419 /* REC timedout. send ABTS to the orig IO req */
420 if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
421 spin_unlock_bh(&tgt->tgt_lock);
422 rc = bnx2fc_send_rec(orig_io_req);
423 spin_lock_bh(&tgt->tgt_lock);
424 if (!rc)
425 goto rec_compl_done;
426 }
427 rc = bnx2fc_initiate_abts(orig_io_req);
428 if (rc != SUCCESS) {
429 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
430 "failed xid = 0x%x. issue cleanup\n",
431 orig_io_req->xid);
432 bnx2fc_initiate_cleanup(orig_io_req);
433 }
434 goto rec_compl_done;
435 }
436 mp_req = &(rec_req->mp_req);
437 fc_hdr = &(mp_req->resp_fc_hdr);
438 resp_len = mp_req->resp_len;
439 acc = resp_buf = mp_req->resp_buf;
440
441 hdr_len = sizeof(*fc_hdr);
442
443 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
444 if (!buf) {
445 printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
446 goto rec_compl_done;
447 }
448 memcpy(buf, fc_hdr, hdr_len);
449 memcpy(buf + hdr_len, resp_buf, resp_len);
450
451 fp = fc_frame_alloc(NULL, resp_len);
452 if (!fp) {
453 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
454 goto free_buf;
455 }
456
457 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
458 /* Copy FC Frame header and payload into the frame */
459 memcpy(fh, buf, hdr_len + resp_len);
460
461 opcode = fc_frame_payload_op(fp);
462 if (opcode == ELS_LS_RJT) {
463 BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
464 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
465 if ((rjt->er_reason == ELS_RJT_LOGIC ||
466 rjt->er_reason == ELS_RJT_UNAB) &&
467 rjt->er_explan == ELS_EXPL_OXID_RXID) {
468 BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
469 new_io_req = bnx2fc_cmd_alloc(tgt);
470 if (!new_io_req)
471 goto abort_io;
472 new_io_req->sc_cmd = orig_io_req->sc_cmd;
473 /* cleanup orig_io_req that is with the FW */
474 set_bit(BNX2FC_FLAG_CMD_LOST,
475 &orig_io_req->req_flags);
476 bnx2fc_initiate_cleanup(orig_io_req);
477 /* Post a new IO req with the same sc_cmd */
478 BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
479 spin_unlock_bh(&tgt->tgt_lock);
480 rc = bnx2fc_post_io_req(tgt, new_io_req);
481 spin_lock_bh(&tgt->tgt_lock);
482 if (!rc)
483 goto free_frame;
484 BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
485 }
486abort_io:
487 rc = bnx2fc_initiate_abts(orig_io_req);
488 if (rc != SUCCESS) {
489 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
490 "failed. issue cleanup\n");
491 bnx2fc_initiate_cleanup(orig_io_req);
492 }
493 } else if (opcode == ELS_LS_ACC) {
494 /* REVISIT: Check if the exchange is already aborted */
495 offset = ntohl(acc->reca_fc4value);
496 e_stat = ntohl(acc->reca_e_stat);
497 if (e_stat & ESB_ST_SEQ_INIT) {
498 BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
499 goto free_frame;
500 }
501 BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
502 e_stat, offset);
503 /* Seq initiative is with us */
504 err_entry = (struct fcoe_err_report_entry *)
505 &orig_io_req->err_entry;
506 sc_cmd = orig_io_req->sc_cmd;
507 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
508 /* SCSI WRITE command */
509 if (offset == orig_io_req->data_xfer_len) {
510 BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
511 /* FCP_RSP lost */
512 r_ctl = FC_RCTL_DD_CMD_STATUS;
513 offset = 0;
514 } else {
515 /* start transmitting from offset */
516 BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
517 send_seq_clnp = true;
518 r_ctl = FC_RCTL_DD_DATA_DESC;
519 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
520 offset, r_ctl))
521 abort_io = true;
522 /* XFER_RDY */
523 }
524 } else {
525 /* SCSI READ command */
526 if (err_entry->data.rx_buf_off ==
527 orig_io_req->data_xfer_len) {
528 /* FCP_RSP lost */
529 BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
530 r_ctl = FC_RCTL_DD_CMD_STATUS;
531 offset = 0;
532 } else {
533 /* request retransmission from this offset */
534 send_seq_clnp = true;
535 offset = err_entry->data.rx_buf_off;
536 BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
537 /* FCP_DATA lost */
538 r_ctl = FC_RCTL_DD_SOL_DATA;
539 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
540 offset, r_ctl))
541 abort_io = true;
542 }
543 }
544 if (abort_io) {
545 rc = bnx2fc_initiate_abts(orig_io_req);
546 if (rc != SUCCESS) {
547 BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
548 " failed. issue cleanup\n");
549 bnx2fc_initiate_cleanup(orig_io_req);
550 }
551 } else if (!send_seq_clnp) {
552 BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
553 spin_unlock_bh(&tgt->tgt_lock);
554 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
555 spin_lock_bh(&tgt->tgt_lock);
556
557 if (rc) {
558 BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
559 " IO will abort\n");
560 }
561 }
562 }
563free_frame:
564 fc_frame_free(fp);
565free_buf:
566 kfree(buf);
567rec_compl_done:
568 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
569 kfree(cb_arg);
570}
571
572int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
573{
574 struct fc_els_rec rec;
575 struct bnx2fc_rport *tgt = orig_io_req->tgt;
576 struct fc_lport *lport = tgt->rdata->local_port;
577 struct bnx2fc_els_cb_arg *cb_arg = NULL;
578 u32 sid = tgt->sid;
579 u32 r_a_tov = lport->r_a_tov;
580 int rc;
581
582 BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
583 memset(&rec, 0, sizeof(rec));
584
585 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
586 if (!cb_arg) {
587 printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
588 rc = -ENOMEM;
589 goto rec_err;
590 }
591 kref_get(&orig_io_req->refcount);
592
593 cb_arg->aborted_io_req = orig_io_req;
594
595 rec.rec_cmd = ELS_REC;
596 hton24(rec.rec_s_id, sid);
597 rec.rec_ox_id = htons(orig_io_req->xid);
598 rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
599
600 rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
601 bnx2fc_rec_compl, cb_arg,
602 r_a_tov);
603rec_err:
604 if (rc) {
605 BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
606 spin_lock_bh(&tgt->tgt_lock);
607 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
608 spin_unlock_bh(&tgt->tgt_lock);
609 kfree(cb_arg);
610 }
611 return rc;
612}
613
614int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
615{
616 struct fcp_srr srr;
617 struct bnx2fc_rport *tgt = orig_io_req->tgt;
618 struct fc_lport *lport = tgt->rdata->local_port;
619 struct bnx2fc_els_cb_arg *cb_arg = NULL;
620 u32 r_a_tov = lport->r_a_tov;
621 int rc;
622
623 BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
624 memset(&srr, 0, sizeof(srr));
625
626 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
627 if (!cb_arg) {
628 printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
629 rc = -ENOMEM;
630 goto srr_err;
631 }
632 kref_get(&orig_io_req->refcount);
633
634 cb_arg->aborted_io_req = orig_io_req;
635
636 srr.srr_op = ELS_SRR;
637 srr.srr_ox_id = htons(orig_io_req->xid);
638 srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
639 srr.srr_rel_off = htonl(offset);
640 srr.srr_r_ctl = r_ctl;
641 orig_io_req->srr_offset = offset;
642 orig_io_req->srr_rctl = r_ctl;
643
644 rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
645 bnx2fc_srr_compl, cb_arg,
646 r_a_tov);
647srr_err:
648 if (rc) {
649 BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
650 spin_lock_bh(&tgt->tgt_lock);
651 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
652 spin_unlock_bh(&tgt->tgt_lock);
653 kfree(cb_arg);
654 } else
655 set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
656
657 return rc;
658}
659
256static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, 660static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
257 void *data, u32 data_len, 661 void *data, u32 data_len,
258 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), 662 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
259 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) 663 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
260{ 664{
261 struct fcoe_port *port = tgt->port; 665 struct fcoe_port *port = tgt->port;
262 struct bnx2fc_hba *hba = port->priv; 666 struct bnx2fc_interface *interface = port->priv;
263 struct fc_rport *rport = tgt->rport; 667 struct fc_rport *rport = tgt->rport;
264 struct fc_lport *lport = port->lport; 668 struct fc_lport *lport = port->lport;
265 struct bnx2fc_cmd *els_req; 669 struct bnx2fc_cmd *els_req;
@@ -274,12 +678,12 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
274 678
275 rc = fc_remote_port_chkready(rport); 679 rc = fc_remote_port_chkready(rport);
276 if (rc) { 680 if (rc) {
277 printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op); 681 printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
278 rc = -EINVAL; 682 rc = -EINVAL;
279 goto els_err; 683 goto els_err;
280 } 684 }
281 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 685 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
282 printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op); 686 printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
283 rc = -EINVAL; 687 rc = -EINVAL;
284 goto els_err; 688 goto els_err;
285 } 689 }
@@ -305,7 +709,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
305 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); 709 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
306 rc = bnx2fc_init_mp_req(els_req); 710 rc = bnx2fc_init_mp_req(els_req);
307 if (rc == FAILED) { 711 if (rc == FAILED) {
308 printk(KERN_ALERT PFX "ELS MP request init failed\n"); 712 printk(KERN_ERR PFX "ELS MP request init failed\n");
309 spin_lock_bh(&tgt->tgt_lock); 713 spin_lock_bh(&tgt->tgt_lock);
310 kref_put(&els_req->refcount, bnx2fc_cmd_release); 714 kref_put(&els_req->refcount, bnx2fc_cmd_release);
311 spin_unlock_bh(&tgt->tgt_lock); 715 spin_unlock_bh(&tgt->tgt_lock);
@@ -324,7 +728,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
324 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { 728 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
325 memcpy(mp_req->req_buf, data, data_len); 729 memcpy(mp_req->req_buf, data, data_len);
326 } else { 730 } else {
327 printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op); 731 printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
328 els_req->cb_func = NULL; 732 els_req->cb_func = NULL;
329 els_req->cb_arg = NULL; 733 els_req->cb_arg = NULL;
330 spin_lock_bh(&tgt->tgt_lock); 734 spin_lock_bh(&tgt->tgt_lock);
@@ -342,9 +746,14 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
342 did = tgt->rport->port_id; 746 did = tgt->rport->port_id;
343 sid = tgt->sid; 747 sid = tgt->sid;
344 748
345 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, 749 if (op == ELS_SRR)
346 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 750 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
347 FC_FC_SEQ_INIT, 0); 751 FC_TYPE_FCP, FC_FC_FIRST_SEQ |
752 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
753 else
754 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
755 FC_TYPE_ELS, FC_FC_FIRST_SEQ |
756 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
348 757
349 /* Obtain exchange id */ 758 /* Obtain exchange id */
350 xid = els_req->xid; 759 xid = els_req->xid;
@@ -352,7 +761,8 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
352 index = xid % BNX2FC_TASKS_PER_PAGE; 761 index = xid % BNX2FC_TASKS_PER_PAGE;
353 762
354 /* Initialize task context for this IO request */ 763 /* Initialize task context for this IO request */
355 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 764 task_page = (struct fcoe_task_ctx_entry *)
765 interface->hba->task_ctx[task_idx];
356 task = &(task_page[index]); 766 task = &(task_page[index]);
357 bnx2fc_init_mp_task(els_req, task); 767 bnx2fc_init_mp_task(els_req, task);
358 768
@@ -496,8 +906,8 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
496 void *arg, u32 timeout) 906 void *arg, u32 timeout)
497{ 907{
498 struct fcoe_port *port = lport_priv(lport); 908 struct fcoe_port *port = lport_priv(lport);
499 struct bnx2fc_hba *hba = port->priv; 909 struct bnx2fc_interface *interface = port->priv;
500 struct fcoe_ctlr *fip = &hba->ctlr; 910 struct fcoe_ctlr *fip = &interface->ctlr;
501 struct fc_frame_header *fh = fc_frame_header_get(fp); 911 struct fc_frame_header *fh = fc_frame_header_get(fp);
502 912
503 switch (op) { 913 switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a97aff3a0662..7cb2cd48b17b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -3,7 +3,7 @@
3 * cnic modules to create FCoE instances, send/receive non-offloaded 3 * cnic modules to create FCoE instances, send/receive non-offloaded
4 * FIP/FCoE packets, listen to link events etc. 4 * FIP/FCoE packets, listen to link events etc.
5 * 5 *
6 * Copyright (c) 2008 - 2010 Broadcom Corporation 6 * Copyright (c) 2008 - 2011 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -15,13 +15,14 @@
15#include "bnx2fc.h" 15#include "bnx2fc.h"
16 16
17static struct list_head adapter_list; 17static struct list_head adapter_list;
18static struct list_head if_list;
18static u32 adapter_count; 19static u32 adapter_count;
19static DEFINE_MUTEX(bnx2fc_dev_lock); 20static DEFINE_MUTEX(bnx2fc_dev_lock);
20DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); 21DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
21 22
22#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
23#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
24#define DRV_MODULE_RELDATE "Jun 10, 2011" 25#define DRV_MODULE_RELDATE "Jun 23, 2011"
25 26
26 27
27static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -61,7 +62,7 @@ static int bnx2fc_disable(struct net_device *netdev);
61 62
62static void bnx2fc_recv_frame(struct sk_buff *skb); 63static void bnx2fc_recv_frame(struct sk_buff *skb);
63 64
64static void bnx2fc_start_disc(struct bnx2fc_hba *hba); 65static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
65static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); 66static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
66static int bnx2fc_net_config(struct fc_lport *lp); 67static int bnx2fc_net_config(struct fc_lport *lp);
67static int bnx2fc_lport_config(struct fc_lport *lport); 68static int bnx2fc_lport_config(struct fc_lport *lport);
@@ -70,18 +71,20 @@ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
70static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); 71static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
71static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); 72static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
72static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); 73static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
73static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, 74static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
74 struct device *parent, int npiv); 75 struct device *parent, int npiv);
75static void bnx2fc_destroy_work(struct work_struct *work); 76static void bnx2fc_destroy_work(struct work_struct *work);
76 77
77static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); 78static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
79static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
80 *phys_dev);
78static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); 81static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
79 82
80static int bnx2fc_fw_init(struct bnx2fc_hba *hba); 83static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
81static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); 84static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
82 85
83static void bnx2fc_port_shutdown(struct fc_lport *lport); 86static void bnx2fc_port_shutdown(struct fc_lport *lport);
84static void bnx2fc_stop(struct bnx2fc_hba *hba); 87static void bnx2fc_stop(struct bnx2fc_interface *interface);
85static int __init bnx2fc_mod_init(void); 88static int __init bnx2fc_mod_init(void);
86static void __exit bnx2fc_mod_exit(void); 89static void __exit bnx2fc_mod_exit(void);
87 90
@@ -142,7 +145,8 @@ static void bnx2fc_abort_io(struct fc_lport *lport)
142static void bnx2fc_cleanup(struct fc_lport *lport) 145static void bnx2fc_cleanup(struct fc_lport *lport)
143{ 146{
144 struct fcoe_port *port = lport_priv(lport); 147 struct fcoe_port *port = lport_priv(lport);
145 struct bnx2fc_hba *hba = port->priv; 148 struct bnx2fc_interface *interface = port->priv;
149 struct bnx2fc_hba *hba = interface->hba;
146 struct bnx2fc_rport *tgt; 150 struct bnx2fc_rport *tgt;
147 int i; 151 int i;
148 152
@@ -219,7 +223,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
219 struct fcoe_crc_eof *cp; 223 struct fcoe_crc_eof *cp;
220 struct sk_buff *skb; 224 struct sk_buff *skb;
221 struct fc_frame_header *fh; 225 struct fc_frame_header *fh;
222 struct bnx2fc_hba *hba; 226 struct bnx2fc_interface *interface;
227 struct bnx2fc_hba *hba;
223 struct fcoe_port *port; 228 struct fcoe_port *port;
224 struct fcoe_hdr *hp; 229 struct fcoe_hdr *hp;
225 struct bnx2fc_rport *tgt; 230 struct bnx2fc_rport *tgt;
@@ -230,7 +235,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
230 int wlen, rc = 0; 235 int wlen, rc = 0;
231 236
232 port = (struct fcoe_port *)lport_priv(lport); 237 port = (struct fcoe_port *)lport_priv(lport);
233 hba = port->priv; 238 interface = port->priv;
239 hba = interface->hba;
234 240
235 fh = fc_frame_header_get(fp); 241 fh = fc_frame_header_get(fp);
236 242
@@ -242,12 +248,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
242 } 248 }
243 249
244 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { 250 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
245 if (!hba->ctlr.sel_fcf) { 251 if (!interface->ctlr.sel_fcf) {
246 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); 252 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
247 kfree_skb(skb); 253 kfree_skb(skb);
248 return -EINVAL; 254 return -EINVAL;
249 } 255 }
250 if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb)) 256 if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
251 return 0; 257 return 0;
252 } 258 }
253 259
@@ -316,19 +322,19 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
316 skb_reset_network_header(skb); 322 skb_reset_network_header(skb);
317 skb->mac_len = elen; 323 skb->mac_len = elen;
318 skb->protocol = htons(ETH_P_FCOE); 324 skb->protocol = htons(ETH_P_FCOE);
319 skb->dev = hba->netdev; 325 skb->dev = interface->netdev;
320 326
321 /* fill up mac and fcoe headers */ 327 /* fill up mac and fcoe headers */
322 eh = eth_hdr(skb); 328 eh = eth_hdr(skb);
323 eh->h_proto = htons(ETH_P_FCOE); 329 eh->h_proto = htons(ETH_P_FCOE);
324 if (hba->ctlr.map_dest) 330 if (interface->ctlr.map_dest)
325 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 331 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
326 else 332 else
327 /* insert GW address */ 333 /* insert GW address */
328 memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN); 334 memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
329 335
330 if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 336 if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
331 memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN); 337 memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
332 else 338 else
333 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 339 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
334 340
@@ -377,22 +383,23 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
377 struct packet_type *ptype, struct net_device *olddev) 383 struct packet_type *ptype, struct net_device *olddev)
378{ 384{
379 struct fc_lport *lport; 385 struct fc_lport *lport;
380 struct bnx2fc_hba *hba; 386 struct bnx2fc_interface *interface;
381 struct fc_frame_header *fh; 387 struct fc_frame_header *fh;
382 struct fcoe_rcv_info *fr; 388 struct fcoe_rcv_info *fr;
383 struct fcoe_percpu_s *bg; 389 struct fcoe_percpu_s *bg;
384 unsigned short oxid; 390 unsigned short oxid;
385 391
386 hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type); 392 interface = container_of(ptype, struct bnx2fc_interface,
387 lport = hba->ctlr.lp; 393 fcoe_packet_type);
394 lport = interface->ctlr.lp;
388 395
389 if (unlikely(lport == NULL)) { 396 if (unlikely(lport == NULL)) {
390 printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n"); 397 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
391 goto err; 398 goto err;
392 } 399 }
393 400
394 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 401 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
395 printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n"); 402 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
396 goto err; 403 goto err;
397 } 404 }
398 405
@@ -411,7 +418,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
411 418
412 fr = fcoe_dev_from_skb(skb); 419 fr = fcoe_dev_from_skb(skb);
413 fr->fr_dev = lport; 420 fr->fr_dev = lport;
414 fr->ptype = ptype;
415 421
416 bg = &bnx2fc_global; 422 bg = &bnx2fc_global;
417 spin_lock_bh(&bg->fcoe_rx_list.lock); 423 spin_lock_bh(&bg->fcoe_rx_list.lock);
@@ -469,7 +475,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
469 fr = fcoe_dev_from_skb(skb); 475 fr = fcoe_dev_from_skb(skb);
470 lport = fr->fr_dev; 476 lport = fr->fr_dev;
471 if (unlikely(lport == NULL)) { 477 if (unlikely(lport == NULL)) {
472 printk(KERN_ALERT PFX "Invalid lport struct\n"); 478 printk(KERN_ERR PFX "Invalid lport struct\n");
473 kfree_skb(skb); 479 kfree_skb(skb);
474 return; 480 return;
475 } 481 }
@@ -594,7 +600,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
594 struct fc_host_statistics *bnx2fc_stats; 600 struct fc_host_statistics *bnx2fc_stats;
595 struct fc_lport *lport = shost_priv(shost); 601 struct fc_lport *lport = shost_priv(shost);
596 struct fcoe_port *port = lport_priv(lport); 602 struct fcoe_port *port = lport_priv(lport);
597 struct bnx2fc_hba *hba = port->priv; 603 struct bnx2fc_interface *interface = port->priv;
604 struct bnx2fc_hba *hba = interface->hba;
598 struct fcoe_statistics_params *fw_stats; 605 struct fcoe_statistics_params *fw_stats;
599 int rc = 0; 606 int rc = 0;
600 607
@@ -631,7 +638,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
631static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) 638static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
632{ 639{
633 struct fcoe_port *port = lport_priv(lport); 640 struct fcoe_port *port = lport_priv(lport);
634 struct bnx2fc_hba *hba = port->priv; 641 struct bnx2fc_interface *interface = port->priv;
635 struct Scsi_Host *shost = lport->host; 642 struct Scsi_Host *shost = lport->host;
636 int rc = 0; 643 int rc = 0;
637 644
@@ -654,7 +661,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
654 fc_host_max_npiv_vports(lport->host) = USHRT_MAX; 661 fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
655 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", 662 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
656 BNX2FC_NAME, BNX2FC_VERSION, 663 BNX2FC_NAME, BNX2FC_VERSION,
657 hba->netdev->name); 664 interface->netdev->name);
658 665
659 return 0; 666 return 0;
660} 667}
@@ -662,8 +669,8 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
662static void bnx2fc_link_speed_update(struct fc_lport *lport) 669static void bnx2fc_link_speed_update(struct fc_lport *lport)
663{ 670{
664 struct fcoe_port *port = lport_priv(lport); 671 struct fcoe_port *port = lport_priv(lport);
665 struct bnx2fc_hba *hba = port->priv; 672 struct bnx2fc_interface *interface = port->priv;
666 struct net_device *netdev = hba->netdev; 673 struct net_device *netdev = interface->netdev;
667 struct ethtool_cmd ecmd; 674 struct ethtool_cmd ecmd;
668 675
669 if (!dev_ethtool_get_settings(netdev, &ecmd)) { 676 if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -691,7 +698,8 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
691static int bnx2fc_link_ok(struct fc_lport *lport) 698static int bnx2fc_link_ok(struct fc_lport *lport)
692{ 699{
693 struct fcoe_port *port = lport_priv(lport); 700 struct fcoe_port *port = lport_priv(lport);
694 struct bnx2fc_hba *hba = port->priv; 701 struct bnx2fc_interface *interface = port->priv;
702 struct bnx2fc_hba *hba = interface->hba;
695 struct net_device *dev = hba->phys_dev; 703 struct net_device *dev = hba->phys_dev;
696 int rc = 0; 704 int rc = 0;
697 705
@@ -713,7 +721,7 @@ static int bnx2fc_link_ok(struct fc_lport *lport)
713 */ 721 */
714void bnx2fc_get_link_state(struct bnx2fc_hba *hba) 722void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
715{ 723{
716 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) 724 if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state))
717 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 725 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
718 else 726 else
719 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 727 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
@@ -722,11 +730,13 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
722static int bnx2fc_net_config(struct fc_lport *lport) 730static int bnx2fc_net_config(struct fc_lport *lport)
723{ 731{
724 struct bnx2fc_hba *hba; 732 struct bnx2fc_hba *hba;
733 struct bnx2fc_interface *interface;
725 struct fcoe_port *port; 734 struct fcoe_port *port;
726 u64 wwnn, wwpn; 735 u64 wwnn, wwpn;
727 736
728 port = lport_priv(lport); 737 port = lport_priv(lport);
729 hba = port->priv; 738 interface = port->priv;
739 hba = interface->hba;
730 740
731 /* require support for get_pauseparam ethtool op. */ 741 /* require support for get_pauseparam ethtool op. */
732 if (!hba->phys_dev->ethtool_ops || 742 if (!hba->phys_dev->ethtool_ops ||
@@ -743,11 +753,11 @@ static int bnx2fc_net_config(struct fc_lport *lport)
743 bnx2fc_link_speed_update(lport); 753 bnx2fc_link_speed_update(lport);
744 754
745 if (!lport->vport) { 755 if (!lport->vport) {
746 wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0); 756 wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0);
747 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); 757 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
748 fc_set_wwnn(lport, wwnn); 758 fc_set_wwnn(lport, wwnn);
749 759
750 wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0); 760 wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0);
751 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); 761 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
752 fc_set_wwpn(lport, wwpn); 762 fc_set_wwpn(lport, wwpn);
753 } 763 }
@@ -759,9 +769,9 @@ static void bnx2fc_destroy_timer(unsigned long data)
759{ 769{
760 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; 770 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
761 771
762 BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - " 772 BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - "
763 "Destroy compl not received!!\n"); 773 "Destroy compl not received!!\n");
764 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; 774 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
765 wake_up_interruptible(&hba->destroy_wait); 775 wake_up_interruptible(&hba->destroy_wait);
766} 776}
767 777
@@ -779,54 +789,35 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
779 u16 vlan_id) 789 u16 vlan_id)
780{ 790{
781 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; 791 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
782 struct fc_lport *lport = hba->ctlr.lp; 792 struct fc_lport *lport;
783 struct fc_lport *vport; 793 struct fc_lport *vport;
794 struct bnx2fc_interface *interface;
795 int wait_for_upload = 0;
784 u32 link_possible = 1; 796 u32 link_possible = 1;
785 797
786 /* Ignore vlans for now */ 798 /* Ignore vlans for now */
787 if (vlan_id != 0) 799 if (vlan_id != 0)
788 return; 800 return;
789 801
790 if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
791 BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
792 hba->netdev->name, event);
793 return;
794 }
795
796 /*
797 * ASSUMPTION:
798 * indicate_netevent cannot be called from cnic unless bnx2fc
799 * does register_device
800 */
801 BUG_ON(!lport);
802
803 BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
804 hba->netdev->name, event);
805
806 switch (event) { 802 switch (event) {
807 case NETDEV_UP: 803 case NETDEV_UP:
808 BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
809 hba->adapter_state);
810 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) 804 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
811 printk(KERN_ERR "indicate_netevent: "\ 805 printk(KERN_ERR "indicate_netevent: "\
812 "adapter is not UP!!\n"); 806 "hba is not UP!!\n");
813 break; 807 break;
814 808
815 case NETDEV_DOWN: 809 case NETDEV_DOWN:
816 BNX2FC_HBA_DBG(lport, "Port down\n");
817 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); 810 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
818 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); 811 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
819 link_possible = 0; 812 link_possible = 0;
820 break; 813 break;
821 814
822 case NETDEV_GOING_DOWN: 815 case NETDEV_GOING_DOWN:
823 BNX2FC_HBA_DBG(lport, "Port going down\n");
824 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); 816 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
825 link_possible = 0; 817 link_possible = 0;
826 break; 818 break;
827 819
828 case NETDEV_CHANGE: 820 case NETDEV_CHANGE:
829 BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
830 break; 821 break;
831 822
832 default: 823 default:
@@ -834,15 +825,22 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
834 return; 825 return;
835 } 826 }
836 827
837 bnx2fc_link_speed_update(lport); 828 mutex_lock(&bnx2fc_dev_lock);
829 list_for_each_entry(interface, &if_list, list) {
838 830
839 if (link_possible && !bnx2fc_link_ok(lport)) { 831 if (interface->hba != hba)
840 printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n"); 832 continue;
841 fcoe_ctlr_link_up(&hba->ctlr); 833
842 } else { 834 lport = interface->ctlr.lp;
843 printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n"); 835 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
844 if (fcoe_ctlr_link_down(&hba->ctlr)) { 836 interface->netdev->name, event);
845 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); 837
838 bnx2fc_link_speed_update(lport);
839
840 if (link_possible && !bnx2fc_link_ok(lport)) {
841 printk(KERN_ERR "indicate_netevent: ctlr_link_up\n");
842 fcoe_ctlr_link_up(&interface->ctlr);
843 } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
846 mutex_lock(&lport->lp_mutex); 844 mutex_lock(&lport->lp_mutex);
847 list_for_each_entry(vport, &lport->vports, list) 845 list_for_each_entry(vport, &lport->vports, list)
848 fc_host_port_type(vport->host) = 846 fc_host_port_type(vport->host) =
@@ -853,24 +851,26 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
853 get_cpu())->LinkFailureCount++; 851 get_cpu())->LinkFailureCount++;
854 put_cpu(); 852 put_cpu();
855 fcoe_clean_pending_queue(lport); 853 fcoe_clean_pending_queue(lport);
854 wait_for_upload = 1;
855 }
856 }
857 mutex_unlock(&bnx2fc_dev_lock);
856 858
857 init_waitqueue_head(&hba->shutdown_wait); 859 if (wait_for_upload) {
858 BNX2FC_HBA_DBG(lport, "indicate_netevent " 860 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
859 "num_ofld_sess = %d\n", 861 init_waitqueue_head(&hba->shutdown_wait);
860 hba->num_ofld_sess); 862 BNX2FC_MISC_DBG("indicate_netevent "
861 hba->wait_for_link_down = 1; 863 "num_ofld_sess = %d\n",
862 BNX2FC_HBA_DBG(lport, "waiting for uploads to " 864 hba->num_ofld_sess);
863 "compl proc = %s\n", 865 hba->wait_for_link_down = 1;
864 current->comm); 866 wait_event_interruptible(hba->shutdown_wait,
865 wait_event_interruptible(hba->shutdown_wait, 867 (hba->num_ofld_sess == 0));
866 (hba->num_ofld_sess == 0)); 868 BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n",
867 BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
868 hba->num_ofld_sess); 869 hba->num_ofld_sess);
869 hba->wait_for_link_down = 0; 870 hba->wait_for_link_down = 0;
870 871
871 if (signal_pending(current)) 872 if (signal_pending(current))
872 flush_signals(current); 873 flush_signals(current);
873 }
874 } 874 }
875} 875}
876 876
@@ -889,23 +889,12 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
889 889
890static int bnx2fc_em_config(struct fc_lport *lport) 890static int bnx2fc_em_config(struct fc_lport *lport)
891{ 891{
892 struct fcoe_port *port = lport_priv(lport);
893 struct bnx2fc_hba *hba = port->priv;
894
895 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, 892 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
896 FCOE_MAX_XID, NULL)) { 893 FCOE_MAX_XID, NULL)) {
897 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); 894 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
898 return -ENOMEM; 895 return -ENOMEM;
899 } 896 }
900 897
901 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
902 BNX2FC_MAX_XID);
903
904 if (!hba->cmd_mgr) {
905 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
906 fc_exch_mgr_free(lport);
907 return -ENOMEM;
908 }
909 return 0; 898 return 0;
910} 899}
911 900
@@ -918,11 +907,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
918 lport->e_d_tov = 2 * 1000; 907 lport->e_d_tov = 2 * 1000;
919 lport->r_a_tov = 10 * 1000; 908 lport->r_a_tov = 10 * 1000;
920 909
921 /* REVISIT: enable when supporting tape devices
922 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 910 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
923 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 911 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
924 */
925 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
926 lport->does_npiv = 1; 912 lport->does_npiv = 1;
927 913
928 memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); 914 memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
@@ -952,9 +938,10 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
952 struct packet_type *ptype, 938 struct packet_type *ptype,
953 struct net_device *orig_dev) 939 struct net_device *orig_dev)
954{ 940{
955 struct bnx2fc_hba *hba; 941 struct bnx2fc_interface *interface;
956 hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type); 942 interface = container_of(ptype, struct bnx2fc_interface,
957 fcoe_ctlr_recv(&hba->ctlr, skb); 943 fip_packet_type);
944 fcoe_ctlr_recv(&interface->ctlr, skb);
958 return 0; 945 return 0;
959} 946}
960 947
@@ -1005,17 +992,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
1005 struct Scsi_Host *shost = vport_to_shost(vport); 992 struct Scsi_Host *shost = vport_to_shost(vport);
1006 struct fc_lport *n_port = shost_priv(shost); 993 struct fc_lport *n_port = shost_priv(shost);
1007 struct fcoe_port *port = lport_priv(n_port); 994 struct fcoe_port *port = lport_priv(n_port);
1008 struct bnx2fc_hba *hba = port->priv; 995 struct bnx2fc_interface *interface = port->priv;
1009 struct net_device *netdev = hba->netdev; 996 struct net_device *netdev = interface->netdev;
1010 struct fc_lport *vn_port; 997 struct fc_lport *vn_port;
1011 998
1012 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 999 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
1013 printk(KERN_ERR PFX "vn ports cannot be created on" 1000 printk(KERN_ERR PFX "vn ports cannot be created on"
1014 "this hba\n"); 1001 "this interface\n");
1015 return -EIO; 1002 return -EIO;
1016 } 1003 }
1017 mutex_lock(&bnx2fc_dev_lock); 1004 mutex_lock(&bnx2fc_dev_lock);
1018 vn_port = bnx2fc_if_create(hba, &vport->dev, 1); 1005 vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
1019 mutex_unlock(&bnx2fc_dev_lock); 1006 mutex_unlock(&bnx2fc_dev_lock);
1020 1007
1021 if (IS_ERR(vn_port)) { 1008 if (IS_ERR(vn_port)) {
@@ -1065,10 +1052,10 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
1065} 1052}
1066 1053
1067 1054
1068static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) 1055static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface)
1069{ 1056{
1070 struct net_device *netdev = hba->netdev; 1057 struct net_device *netdev = interface->netdev;
1071 struct net_device *physdev = hba->phys_dev; 1058 struct net_device *physdev = interface->hba->phys_dev;
1072 struct netdev_hw_addr *ha; 1059 struct netdev_hw_addr *ha;
1073 int sel_san_mac = 0; 1060 int sel_san_mac = 0;
1074 1061
@@ -1083,7 +1070,8 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
1083 1070
1084 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 1071 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
1085 (is_valid_ether_addr(ha->addr))) { 1072 (is_valid_ether_addr(ha->addr))) {
1086 memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); 1073 memcpy(interface->ctlr.ctl_src_addr, ha->addr,
1074 ETH_ALEN);
1087 sel_san_mac = 1; 1075 sel_san_mac = 1;
1088 BNX2FC_MISC_DBG("Found SAN MAC\n"); 1076 BNX2FC_MISC_DBG("Found SAN MAC\n");
1089 } 1077 }
@@ -1093,15 +1081,15 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
1093 if (!sel_san_mac) 1081 if (!sel_san_mac)
1094 return -ENODEV; 1082 return -ENODEV;
1095 1083
1096 hba->fip_packet_type.func = bnx2fc_fip_recv; 1084 interface->fip_packet_type.func = bnx2fc_fip_recv;
1097 hba->fip_packet_type.type = htons(ETH_P_FIP); 1085 interface->fip_packet_type.type = htons(ETH_P_FIP);
1098 hba->fip_packet_type.dev = netdev; 1086 interface->fip_packet_type.dev = netdev;
1099 dev_add_pack(&hba->fip_packet_type); 1087 dev_add_pack(&interface->fip_packet_type);
1100 1088
1101 hba->fcoe_packet_type.func = bnx2fc_rcv; 1089 interface->fcoe_packet_type.func = bnx2fc_rcv;
1102 hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); 1090 interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
1103 hba->fcoe_packet_type.dev = netdev; 1091 interface->fcoe_packet_type.dev = netdev;
1104 dev_add_pack(&hba->fcoe_packet_type); 1092 dev_add_pack(&interface->fcoe_packet_type);
1105 1093
1106 return 0; 1094 return 0;
1107} 1095}
@@ -1137,53 +1125,54 @@ static void bnx2fc_release_transport(void)
1137 1125
1138static void bnx2fc_interface_release(struct kref *kref) 1126static void bnx2fc_interface_release(struct kref *kref)
1139{ 1127{
1140 struct bnx2fc_hba *hba; 1128 struct bnx2fc_interface *interface;
1141 struct net_device *netdev; 1129 struct net_device *netdev;
1142 struct net_device *phys_dev;
1143 1130
1144 hba = container_of(kref, struct bnx2fc_hba, kref); 1131 interface = container_of(kref, struct bnx2fc_interface, kref);
1145 BNX2FC_MISC_DBG("Interface is being released\n"); 1132 BNX2FC_MISC_DBG("Interface is being released\n");
1146 1133
1147 netdev = hba->netdev; 1134 netdev = interface->netdev;
1148 phys_dev = hba->phys_dev;
1149 1135
1150 /* tear-down FIP controller */ 1136 /* tear-down FIP controller */
1151 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done)) 1137 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
1152 fcoe_ctlr_destroy(&hba->ctlr); 1138 fcoe_ctlr_destroy(&interface->ctlr);
1139
1140 kfree(interface);
1153 1141
1154 /* Free the command manager */
1155 if (hba->cmd_mgr) {
1156 bnx2fc_cmd_mgr_free(hba->cmd_mgr);
1157 hba->cmd_mgr = NULL;
1158 }
1159 dev_put(netdev); 1142 dev_put(netdev);
1160 module_put(THIS_MODULE); 1143 module_put(THIS_MODULE);
1161} 1144}
1162 1145
1163static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba) 1146static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface)
1164{ 1147{
1165 kref_get(&hba->kref); 1148 kref_get(&interface->kref);
1166} 1149}
1167 1150
1168static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba) 1151static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface)
1169{ 1152{
1170 kref_put(&hba->kref, bnx2fc_interface_release); 1153 kref_put(&interface->kref, bnx2fc_interface_release);
1171} 1154}
1172static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba) 1155static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
1173{ 1156{
1157 /* Free the command manager */
1158 if (hba->cmd_mgr) {
1159 bnx2fc_cmd_mgr_free(hba->cmd_mgr);
1160 hba->cmd_mgr = NULL;
1161 }
1162 kfree(hba->tgt_ofld_list);
1174 bnx2fc_unbind_pcidev(hba); 1163 bnx2fc_unbind_pcidev(hba);
1175 kfree(hba); 1164 kfree(hba);
1176} 1165}
1177 1166
1178/** 1167/**
1179 * bnx2fc_interface_create - create a new fcoe instance 1168 * bnx2fc_hba_create - create a new bnx2fc hba
1180 * 1169 *
1181 * @cnic: pointer to cnic device 1170 * @cnic: pointer to cnic device
1182 * 1171 *
1183 * Creates a new FCoE instance on the given device which include allocating 1172 * Creates a new FCoE hba on the given device.
1184 * hba structure, scsi_host and lport structures. 1173 *
1185 */ 1174 */
1186static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic) 1175static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1187{ 1176{
1188 struct bnx2fc_hba *hba; 1177 struct bnx2fc_hba *hba;
1189 int rc; 1178 int rc;
@@ -1198,65 +1187,83 @@ static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
1198 1187
1199 hba->cnic = cnic; 1188 hba->cnic = cnic;
1200 rc = bnx2fc_bind_pcidev(hba); 1189 rc = bnx2fc_bind_pcidev(hba);
1201 if (rc) 1190 if (rc) {
1191 printk(KERN_ERR PFX "create_adapter: bind error\n");
1202 goto bind_err; 1192 goto bind_err;
1193 }
1203 hba->phys_dev = cnic->netdev; 1194 hba->phys_dev = cnic->netdev;
1204 /* will get overwritten after we do vlan discovery */ 1195 hba->next_conn_id = 0;
1205 hba->netdev = hba->phys_dev; 1196
1197 hba->tgt_ofld_list =
1198 kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS,
1199 GFP_KERNEL);
1200 if (!hba->tgt_ofld_list) {
1201 printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
1202 goto tgtofld_err;
1203 }
1204
1205 hba->num_ofld_sess = 0;
1206
1207 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
1208 BNX2FC_MAX_XID);
1209 if (!hba->cmd_mgr) {
1210 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1211 goto cmgr_err;
1212 }
1206 1213
1207 init_waitqueue_head(&hba->shutdown_wait); 1214 init_waitqueue_head(&hba->shutdown_wait);
1208 init_waitqueue_head(&hba->destroy_wait); 1215 init_waitqueue_head(&hba->destroy_wait);
1216 INIT_LIST_HEAD(&hba->vports);
1209 1217
1210 return hba; 1218 return hba;
1219
1220cmgr_err:
1221 kfree(hba->tgt_ofld_list);
1222tgtofld_err:
1223 bnx2fc_unbind_pcidev(hba);
1211bind_err: 1224bind_err:
1212 printk(KERN_ERR PFX "create_interface: bind error\n");
1213 kfree(hba); 1225 kfree(hba);
1214 return NULL; 1226 return NULL;
1215} 1227}
1216 1228
1217static int bnx2fc_interface_setup(struct bnx2fc_hba *hba, 1229struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1218 enum fip_state fip_mode) 1230 struct net_device *netdev,
1231 enum fip_state fip_mode)
1219{ 1232{
1233 struct bnx2fc_interface *interface;
1220 int rc = 0; 1234 int rc = 0;
1221 struct net_device *netdev = hba->netdev;
1222 struct fcoe_ctlr *fip = &hba->ctlr;
1223 1235
1236 interface = kzalloc(sizeof(*interface), GFP_KERNEL);
1237 if (!interface) {
1238 printk(KERN_ERR PFX "Unable to allocate interface structure\n");
1239 return NULL;
1240 }
1224 dev_hold(netdev); 1241 dev_hold(netdev);
1225 kref_init(&hba->kref); 1242 kref_init(&interface->kref);
1226 1243 interface->hba = hba;
1227 hba->flags = 0; 1244 interface->netdev = netdev;
1228 1245
1229 /* Initialize FIP */ 1246 /* Initialize FIP */
1230 memset(fip, 0, sizeof(*fip)); 1247 fcoe_ctlr_init(&interface->ctlr, fip_mode);
1231 fcoe_ctlr_init(fip, fip_mode); 1248 interface->ctlr.send = bnx2fc_fip_send;
1232 hba->ctlr.send = bnx2fc_fip_send; 1249 interface->ctlr.update_mac = bnx2fc_update_src_mac;
1233 hba->ctlr.update_mac = bnx2fc_update_src_mac; 1250 interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
1234 hba->ctlr.get_src_addr = bnx2fc_get_src_mac; 1251 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
1235 set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
1236
1237 INIT_LIST_HEAD(&hba->vports);
1238 rc = bnx2fc_netdev_setup(hba);
1239 if (rc)
1240 goto setup_err;
1241 1252
1242 hba->next_conn_id = 0; 1253 rc = bnx2fc_netdev_setup(interface);
1254 if (!rc)
1255 return interface;
1243 1256
1244 memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list)); 1257 fcoe_ctlr_destroy(&interface->ctlr);
1245 hba->num_ofld_sess = 0;
1246
1247 return 0;
1248
1249setup_err:
1250 fcoe_ctlr_destroy(&hba->ctlr);
1251 dev_put(netdev); 1258 dev_put(netdev);
1252 bnx2fc_interface_put(hba); 1259 kfree(interface);
1253 return rc; 1260 return NULL;
1254} 1261}
1255 1262
1256/** 1263/**
1257 * bnx2fc_if_create - Create FCoE instance on a given interface 1264 * bnx2fc_if_create - Create FCoE instance on a given interface
1258 * 1265 *
1259 * @hba: FCoE interface to create a local port on 1266 * @interface: FCoE interface to create a local port on
1260 * @parent: Device pointer to be the parent in sysfs for the SCSI host 1267 * @parent: Device pointer to be the parent in sysfs for the SCSI host
1261 * @npiv: Indicates if the port is vport or not 1268 * @npiv: Indicates if the port is vport or not
1262 * 1269 *
@@ -1264,7 +1271,7 @@ setup_err:
1264 * 1271 *
1265 * Returns: Allocated fc_lport or an error pointer 1272 * Returns: Allocated fc_lport or an error pointer
1266 */ 1273 */
1267static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, 1274static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1268 struct device *parent, int npiv) 1275 struct device *parent, int npiv)
1269{ 1276{
1270 struct fc_lport *lport, *n_port; 1277 struct fc_lport *lport, *n_port;
@@ -1272,11 +1279,12 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1272 struct Scsi_Host *shost; 1279 struct Scsi_Host *shost;
1273 struct fc_vport *vport = dev_to_vport(parent); 1280 struct fc_vport *vport = dev_to_vport(parent);
1274 struct bnx2fc_lport *blport; 1281 struct bnx2fc_lport *blport;
1282 struct bnx2fc_hba *hba;
1275 int rc = 0; 1283 int rc = 0;
1276 1284
1277 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1285 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
1278 if (!blport) { 1286 if (!blport) {
1279 BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n"); 1287 BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
1280 return NULL; 1288 return NULL;
1281 } 1289 }
1282 1290
@@ -1293,7 +1301,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1293 shost = lport->host; 1301 shost = lport->host;
1294 port = lport_priv(lport); 1302 port = lport_priv(lport);
1295 port->lport = lport; 1303 port->lport = lport;
1296 port->priv = hba; 1304 port->priv = interface;
1297 INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); 1305 INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
1298 1306
1299 /* Configure fcoe_port */ 1307 /* Configure fcoe_port */
@@ -1317,7 +1325,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1317 rc = bnx2fc_shost_config(lport, parent); 1325 rc = bnx2fc_shost_config(lport, parent);
1318 if (rc) { 1326 if (rc) {
1319 printk(KERN_ERR PFX "Couldnt configure shost for %s\n", 1327 printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
1320 hba->netdev->name); 1328 interface->netdev->name);
1321 goto lp_config_err; 1329 goto lp_config_err;
1322 } 1330 }
1323 1331
@@ -1343,8 +1351,9 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1343 goto shost_err; 1351 goto shost_err;
1344 } 1352 }
1345 1353
1346 bnx2fc_interface_get(hba); 1354 bnx2fc_interface_get(interface);
1347 1355
1356 hba = interface->hba;
1348 spin_lock_bh(&hba->hba_lock); 1357 spin_lock_bh(&hba->hba_lock);
1349 blport->lport = lport; 1358 blport->lport = lport;
1350 list_add_tail(&blport->list, &hba->vports); 1359 list_add_tail(&blport->list, &hba->vports);
@@ -1361,21 +1370,19 @@ free_blport:
1361 return NULL; 1370 return NULL;
1362} 1371}
1363 1372
1364static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba) 1373static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
1365{ 1374{
1366 /* Dont listen for Ethernet packets anymore */ 1375 /* Dont listen for Ethernet packets anymore */
1367 __dev_remove_pack(&hba->fcoe_packet_type); 1376 __dev_remove_pack(&interface->fcoe_packet_type);
1368 __dev_remove_pack(&hba->fip_packet_type); 1377 __dev_remove_pack(&interface->fip_packet_type);
1369 synchronize_net(); 1378 synchronize_net();
1370} 1379}
1371 1380
1372static void bnx2fc_if_destroy(struct fc_lport *lport) 1381static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
1373{ 1382{
1374 struct fcoe_port *port = lport_priv(lport); 1383 struct fcoe_port *port = lport_priv(lport);
1375 struct bnx2fc_hba *hba = port->priv;
1376 struct bnx2fc_lport *blport, *tmp; 1384 struct bnx2fc_lport *blport, *tmp;
1377 1385
1378 BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
1379 /* Stop the transmit retry timer */ 1386 /* Stop the transmit retry timer */
1380 del_timer_sync(&port->timer); 1387 del_timer_sync(&port->timer);
1381 1388
@@ -1409,8 +1416,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1409 1416
1410 /* Release Scsi_Host */ 1417 /* Release Scsi_Host */
1411 scsi_host_put(lport->host); 1418 scsi_host_put(lport->host);
1412
1413 bnx2fc_interface_put(hba);
1414} 1419}
1415 1420
1416/** 1421/**
@@ -1425,46 +1430,31 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1425 */ 1430 */
1426static int bnx2fc_destroy(struct net_device *netdev) 1431static int bnx2fc_destroy(struct net_device *netdev)
1427{ 1432{
1428 struct bnx2fc_hba *hba = NULL; 1433 struct bnx2fc_interface *interface = NULL;
1429 struct net_device *phys_dev; 1434 struct bnx2fc_hba *hba;
1435 struct fc_lport *lport;
1430 int rc = 0; 1436 int rc = 0;
1431 1437
1432 rtnl_lock(); 1438 rtnl_lock();
1433
1434 mutex_lock(&bnx2fc_dev_lock); 1439 mutex_lock(&bnx2fc_dev_lock);
1435 /* obtain physical netdev */
1436 if (netdev->priv_flags & IFF_802_1Q_VLAN)
1437 phys_dev = vlan_dev_real_dev(netdev);
1438 else {
1439 printk(KERN_ERR PFX "Not a vlan device\n");
1440 rc = -ENODEV;
1441 goto netdev_err;
1442 }
1443 1440
1444 hba = bnx2fc_hba_lookup(phys_dev); 1441 interface = bnx2fc_interface_lookup(netdev);
1445 if (!hba || !hba->ctlr.lp) { 1442 if (!interface || !interface->ctlr.lp) {
1446 rc = -ENODEV; 1443 rc = -ENODEV;
1447 printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n"); 1444 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
1448 goto netdev_err;
1449 }
1450
1451 if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1452 printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
1453 goto netdev_err; 1445 goto netdev_err;
1454 } 1446 }
1455 1447
1456 bnx2fc_netdev_cleanup(hba); 1448 hba = interface->hba;
1457
1458 bnx2fc_stop(hba);
1459
1460 bnx2fc_if_destroy(hba->ctlr.lp);
1461 1449
1462 destroy_workqueue(hba->timer_work_queue); 1450 bnx2fc_netdev_cleanup(interface);
1451 lport = interface->ctlr.lp;
1452 bnx2fc_stop(interface);
1453 list_del(&interface->list);
1454 destroy_workqueue(interface->timer_work_queue);
1455 bnx2fc_interface_put(interface);
1456 bnx2fc_if_destroy(lport, hba);
1463 1457
1464 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
1465 bnx2fc_fw_destroy(hba);
1466
1467 clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
1468netdev_err: 1458netdev_err:
1469 mutex_unlock(&bnx2fc_dev_lock); 1459 mutex_unlock(&bnx2fc_dev_lock);
1470 rtnl_unlock(); 1460 rtnl_unlock();
@@ -1475,16 +1465,20 @@ static void bnx2fc_destroy_work(struct work_struct *work)
1475{ 1465{
1476 struct fcoe_port *port; 1466 struct fcoe_port *port;
1477 struct fc_lport *lport; 1467 struct fc_lport *lport;
1468 struct bnx2fc_interface *interface;
1469 struct bnx2fc_hba *hba;
1478 1470
1479 port = container_of(work, struct fcoe_port, destroy_work); 1471 port = container_of(work, struct fcoe_port, destroy_work);
1480 lport = port->lport; 1472 lport = port->lport;
1473 interface = port->priv;
1474 hba = interface->hba;
1481 1475
1482 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); 1476 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
1483 1477
1484 bnx2fc_port_shutdown(lport); 1478 bnx2fc_port_shutdown(lport);
1485 rtnl_lock(); 1479 rtnl_lock();
1486 mutex_lock(&bnx2fc_dev_lock); 1480 mutex_lock(&bnx2fc_dev_lock);
1487 bnx2fc_if_destroy(lport); 1481 bnx2fc_if_destroy(lport, hba);
1488 mutex_unlock(&bnx2fc_dev_lock); 1482 mutex_unlock(&bnx2fc_dev_lock);
1489 rtnl_unlock(); 1483 rtnl_unlock();
1490} 1484}
@@ -1556,28 +1550,27 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1556static void bnx2fc_ulp_start(void *handle) 1550static void bnx2fc_ulp_start(void *handle)
1557{ 1551{
1558 struct bnx2fc_hba *hba = handle; 1552 struct bnx2fc_hba *hba = handle;
1559 struct fc_lport *lport = hba->ctlr.lp; 1553 struct bnx2fc_interface *interface;
1554 struct fc_lport *lport;
1560 1555
1561 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1562 mutex_lock(&bnx2fc_dev_lock); 1556 mutex_lock(&bnx2fc_dev_lock);
1563 1557
1564 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) 1558 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
1565 goto start_disc;
1566
1567 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
1568 bnx2fc_fw_init(hba); 1559 bnx2fc_fw_init(hba);
1569 1560
1570start_disc:
1571 mutex_unlock(&bnx2fc_dev_lock);
1572
1573 BNX2FC_MISC_DBG("bnx2fc started.\n"); 1561 BNX2FC_MISC_DBG("bnx2fc started.\n");
1574 1562
1575 /* Kick off Fabric discovery*/ 1563 list_for_each_entry(interface, &if_list, list) {
1576 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { 1564 if (interface->hba == hba) {
1577 printk(KERN_ERR PFX "ulp_init: start discovery\n"); 1565 lport = interface->ctlr.lp;
1578 lport->tt.frame_send = bnx2fc_xmit; 1566 /* Kick off Fabric discovery*/
1579 bnx2fc_start_disc(hba); 1567 printk(KERN_ERR PFX "ulp_init: start discovery\n");
1568 lport->tt.frame_send = bnx2fc_xmit;
1569 bnx2fc_start_disc(interface);
1570 }
1580 } 1571 }
1572
1573 mutex_unlock(&bnx2fc_dev_lock);
1581} 1574}
1582 1575
1583static void bnx2fc_port_shutdown(struct fc_lport *lport) 1576static void bnx2fc_port_shutdown(struct fc_lport *lport)
@@ -1587,37 +1580,25 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
1587 fc_lport_destroy(lport); 1580 fc_lport_destroy(lport);
1588} 1581}
1589 1582
1590static void bnx2fc_stop(struct bnx2fc_hba *hba) 1583static void bnx2fc_stop(struct bnx2fc_interface *interface)
1591{ 1584{
1592 struct fc_lport *lport; 1585 struct fc_lport *lport;
1593 struct fc_lport *vport; 1586 struct fc_lport *vport;
1594 1587
1595 BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__, 1588 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
1596 hba->init_done); 1589 return;
1597 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
1598 test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1599 lport = hba->ctlr.lp;
1600 bnx2fc_port_shutdown(lport);
1601 BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
1602 "offloaded sessions\n",
1603 hba->num_ofld_sess);
1604 wait_event_interruptible(hba->shutdown_wait,
1605 (hba->num_ofld_sess == 0));
1606 mutex_lock(&lport->lp_mutex);
1607 list_for_each_entry(vport, &lport->vports, list)
1608 fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
1609 mutex_unlock(&lport->lp_mutex);
1610 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1611 fcoe_ctlr_link_down(&hba->ctlr);
1612 fcoe_clean_pending_queue(lport);
1613
1614 mutex_lock(&hba->hba_mutex);
1615 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1616 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
1617 1590
1618 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); 1591 lport = interface->ctlr.lp;
1619 mutex_unlock(&hba->hba_mutex); 1592 bnx2fc_port_shutdown(lport);
1620 } 1593
1594 mutex_lock(&lport->lp_mutex);
1595 list_for_each_entry(vport, &lport->vports, list)
1596 fc_host_port_type(vport->host) =
1597 FC_PORTTYPE_UNKNOWN;
1598 mutex_unlock(&lport->lp_mutex);
1599 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1600 fcoe_ctlr_link_down(&interface->ctlr);
1601 fcoe_clean_pending_queue(lport);
1621} 1602}
1622 1603
1623static int bnx2fc_fw_init(struct bnx2fc_hba *hba) 1604static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
@@ -1656,8 +1637,7 @@ static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
1656 } 1637 }
1657 1638
1658 1639
1659 /* Mark HBA to indicate that the FW INIT is done */ 1640 set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags);
1660 set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
1661 return 0; 1641 return 0;
1662 1642
1663err_unbind: 1643err_unbind:
@@ -1668,7 +1648,7 @@ err_out:
1668 1648
1669static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) 1649static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1670{ 1650{
1671 if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1651 if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) {
1672 if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { 1652 if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
1673 init_timer(&hba->destroy_timer); 1653 init_timer(&hba->destroy_timer);
1674 hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + 1654 hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
@@ -1677,8 +1657,8 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1677 hba->destroy_timer.data = (unsigned long)hba; 1657 hba->destroy_timer.data = (unsigned long)hba;
1678 add_timer(&hba->destroy_timer); 1658 add_timer(&hba->destroy_timer);
1679 wait_event_interruptible(hba->destroy_wait, 1659 wait_event_interruptible(hba->destroy_wait,
1680 (hba->flags & 1660 test_bit(BNX2FC_FLAG_DESTROY_CMPL,
1681 BNX2FC_FLAG_DESTROY_CMPL)); 1661 &hba->flags));
1682 /* This should never happen */ 1662 /* This should never happen */
1683 if (signal_pending(current)) 1663 if (signal_pending(current))
1684 flush_signals(current); 1664 flush_signals(current);
@@ -1699,40 +1679,57 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1699 */ 1679 */
1700static void bnx2fc_ulp_stop(void *handle) 1680static void bnx2fc_ulp_stop(void *handle)
1701{ 1681{
1702 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle; 1682 struct bnx2fc_hba *hba = handle;
1683 struct bnx2fc_interface *interface;
1703 1684
1704 printk(KERN_ERR "ULP_STOP\n"); 1685 printk(KERN_ERR "ULP_STOP\n");
1705 1686
1706 mutex_lock(&bnx2fc_dev_lock); 1687 mutex_lock(&bnx2fc_dev_lock);
1707 bnx2fc_stop(hba); 1688 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
1689 goto exit;
1690 list_for_each_entry(interface, &if_list, list) {
1691 if (interface->hba == hba)
1692 bnx2fc_stop(interface);
1693 }
1694 BUG_ON(hba->num_ofld_sess != 0);
1695
1696 mutex_lock(&hba->hba_mutex);
1697 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1698 clear_bit(ADAPTER_STATE_GOING_DOWN,
1699 &hba->adapter_state);
1700
1701 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
1702 mutex_unlock(&hba->hba_mutex);
1703
1708 bnx2fc_fw_destroy(hba); 1704 bnx2fc_fw_destroy(hba);
1705exit:
1709 mutex_unlock(&bnx2fc_dev_lock); 1706 mutex_unlock(&bnx2fc_dev_lock);
1710} 1707}
1711 1708
1712static void bnx2fc_start_disc(struct bnx2fc_hba *hba) 1709static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1713{ 1710{
1714 struct fc_lport *lport; 1711 struct fc_lport *lport;
1715 int wait_cnt = 0; 1712 int wait_cnt = 0;
1716 1713
1717 BNX2FC_MISC_DBG("Entered %s\n", __func__); 1714 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1718 /* Kick off FIP/FLOGI */ 1715 /* Kick off FIP/FLOGI */
1719 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1716 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
1720 printk(KERN_ERR PFX "Init not done yet\n"); 1717 printk(KERN_ERR PFX "Init not done yet\n");
1721 return; 1718 return;
1722 } 1719 }
1723 1720
1724 lport = hba->ctlr.lp; 1721 lport = interface->ctlr.lp;
1725 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); 1722 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
1726 1723
1727 if (!bnx2fc_link_ok(lport)) { 1724 if (!bnx2fc_link_ok(lport)) {
1728 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); 1725 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
1729 fcoe_ctlr_link_up(&hba->ctlr); 1726 fcoe_ctlr_link_up(&interface->ctlr);
1730 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1727 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1731 set_bit(ADAPTER_STATE_READY, &hba->adapter_state); 1728 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
1732 } 1729 }
1733 1730
1734 /* wait for the FCF to be selected before issuing FLOGI */ 1731 /* wait for the FCF to be selected before issuing FLOGI */
1735 while (!hba->ctlr.sel_fcf) { 1732 while (!interface->ctlr.sel_fcf) {
1736 msleep(250); 1733 msleep(250);
1737 /* give up after 3 secs */ 1734 /* give up after 3 secs */
1738 if (++wait_cnt > 12) 1735 if (++wait_cnt > 12)
@@ -1758,15 +1755,15 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1758 1755
1759 BNX2FC_MISC_DBG("Entered %s\n", __func__); 1756 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1760 /* bnx2fc works only when bnx2x is loaded */ 1757 /* bnx2fc works only when bnx2x is loaded */
1761 if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1758 if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) ||
1759 (dev->max_fcoe_conn == 0)) {
1762 printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," 1760 printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
1763 " flags: %lx\n", 1761 " flags: %lx fcoe_conn: %d\n",
1764 dev->netdev->name, dev->flags); 1762 dev->netdev->name, dev->flags, dev->max_fcoe_conn);
1765 return; 1763 return;
1766 } 1764 }
1767 1765
1768 /* Configure FCoE interface */ 1766 hba = bnx2fc_hba_create(dev);
1769 hba = bnx2fc_interface_create(dev);
1770 if (!hba) { 1767 if (!hba) {
1771 printk(KERN_ERR PFX "hba initialization failed\n"); 1768 printk(KERN_ERR PFX "hba initialization failed\n");
1772 return; 1769 return;
@@ -1774,7 +1771,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1774 1771
1775 /* Add HBA to the adapter list */ 1772 /* Add HBA to the adapter list */
1776 mutex_lock(&bnx2fc_dev_lock); 1773 mutex_lock(&bnx2fc_dev_lock);
1777 list_add_tail(&hba->link, &adapter_list); 1774 list_add_tail(&hba->list, &adapter_list);
1778 adapter_count++; 1775 adapter_count++;
1779 mutex_unlock(&bnx2fc_dev_lock); 1776 mutex_unlock(&bnx2fc_dev_lock);
1780 1777
@@ -1782,7 +1779,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1782 rc = dev->register_device(dev, CNIC_ULP_FCOE, 1779 rc = dev->register_device(dev, CNIC_ULP_FCOE,
1783 (void *) hba); 1780 (void *) hba);
1784 if (rc) 1781 if (rc)
1785 printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc); 1782 printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc);
1786 else 1783 else
1787 set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); 1784 set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1788} 1785}
@@ -1790,52 +1787,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1790 1787
1791static int bnx2fc_disable(struct net_device *netdev) 1788static int bnx2fc_disable(struct net_device *netdev)
1792{ 1789{
1793 struct bnx2fc_hba *hba; 1790 struct bnx2fc_interface *interface;
1794 struct net_device *phys_dev;
1795 struct ethtool_drvinfo drvinfo;
1796 int rc = 0; 1791 int rc = 0;
1797 1792
1798 rtnl_lock(); 1793 rtnl_lock();
1799
1800 mutex_lock(&bnx2fc_dev_lock); 1794 mutex_lock(&bnx2fc_dev_lock);
1801 1795
1802 /* obtain physical netdev */ 1796 interface = bnx2fc_interface_lookup(netdev);
1803 if (netdev->priv_flags & IFF_802_1Q_VLAN) 1797 if (!interface || !interface->ctlr.lp) {
1804 phys_dev = vlan_dev_real_dev(netdev);
1805 else {
1806 printk(KERN_ERR PFX "Not a vlan device\n");
1807 rc = -ENODEV;
1808 goto nodev;
1809 }
1810
1811 /* verify if the physical device is a netxtreme2 device */
1812 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1813 memset(&drvinfo, 0, sizeof(drvinfo));
1814 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1815 if (strcmp(drvinfo.driver, "bnx2x")) {
1816 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1817 rc = -ENODEV;
1818 goto nodev;
1819 }
1820 } else {
1821 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1822 rc = -ENODEV;
1823 goto nodev;
1824 }
1825
1826 printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
1827
1828 /* obtain hba and initialize rest of the structure */
1829 hba = bnx2fc_hba_lookup(phys_dev);
1830 if (!hba || !hba->ctlr.lp) {
1831 rc = -ENODEV; 1798 rc = -ENODEV;
1832 printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n"); 1799 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
1833 } else { 1800 } else {
1834 fcoe_ctlr_link_down(&hba->ctlr); 1801 fcoe_ctlr_link_down(&interface->ctlr);
1835 fcoe_clean_pending_queue(hba->ctlr.lp); 1802 fcoe_clean_pending_queue(interface->ctlr.lp);
1836 } 1803 }
1837 1804
1838nodev:
1839 mutex_unlock(&bnx2fc_dev_lock); 1805 mutex_unlock(&bnx2fc_dev_lock);
1840 rtnl_unlock(); 1806 rtnl_unlock();
1841 return rc; 1807 return rc;
@@ -1844,48 +1810,19 @@ nodev:
1844 1810
1845static int bnx2fc_enable(struct net_device *netdev) 1811static int bnx2fc_enable(struct net_device *netdev)
1846{ 1812{
1847 struct bnx2fc_hba *hba; 1813 struct bnx2fc_interface *interface;
1848 struct net_device *phys_dev;
1849 struct ethtool_drvinfo drvinfo;
1850 int rc = 0; 1814 int rc = 0;
1851 1815
1852 rtnl_lock(); 1816 rtnl_lock();
1853
1854 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1855 mutex_lock(&bnx2fc_dev_lock); 1817 mutex_lock(&bnx2fc_dev_lock);
1856 1818
1857 /* obtain physical netdev */ 1819 interface = bnx2fc_interface_lookup(netdev);
1858 if (netdev->priv_flags & IFF_802_1Q_VLAN) 1820 if (!interface || !interface->ctlr.lp) {
1859 phys_dev = vlan_dev_real_dev(netdev);
1860 else {
1861 printk(KERN_ERR PFX "Not a vlan device\n");
1862 rc = -ENODEV;
1863 goto nodev;
1864 }
1865 /* verify if the physical device is a netxtreme2 device */
1866 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1867 memset(&drvinfo, 0, sizeof(drvinfo));
1868 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1869 if (strcmp(drvinfo.driver, "bnx2x")) {
1870 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1871 rc = -ENODEV;
1872 goto nodev;
1873 }
1874 } else {
1875 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1876 rc = -ENODEV; 1821 rc = -ENODEV;
1877 goto nodev; 1822 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
1878 } 1823 } else if (!bnx2fc_link_ok(interface->ctlr.lp))
1879 1824 fcoe_ctlr_link_up(&interface->ctlr);
1880 /* obtain hba and initialize rest of the structure */
1881 hba = bnx2fc_hba_lookup(phys_dev);
1882 if (!hba || !hba->ctlr.lp) {
1883 rc = -ENODEV;
1884 printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
1885 } else if (!bnx2fc_link_ok(hba->ctlr.lp))
1886 fcoe_ctlr_link_up(&hba->ctlr);
1887 1825
1888nodev:
1889 mutex_unlock(&bnx2fc_dev_lock); 1826 mutex_unlock(&bnx2fc_dev_lock);
1890 rtnl_unlock(); 1827 rtnl_unlock();
1891 return rc; 1828 return rc;
@@ -1903,6 +1840,7 @@ nodev:
1903 */ 1840 */
1904static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) 1841static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1905{ 1842{
1843 struct bnx2fc_interface *interface;
1906 struct bnx2fc_hba *hba; 1844 struct bnx2fc_hba *hba;
1907 struct net_device *phys_dev; 1845 struct net_device *phys_dev;
1908 struct fc_lport *lport; 1846 struct fc_lport *lport;
@@ -1938,7 +1876,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1938 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { 1876 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1939 memset(&drvinfo, 0, sizeof(drvinfo)); 1877 memset(&drvinfo, 0, sizeof(drvinfo));
1940 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); 1878 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1941 if (strcmp(drvinfo.driver, "bnx2x")) { 1879 if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) {
1942 printk(KERN_ERR PFX "Not a netxtreme2 device\n"); 1880 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1943 rc = -EINVAL; 1881 rc = -EINVAL;
1944 goto netdev_err; 1882 goto netdev_err;
@@ -1949,7 +1887,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1949 goto netdev_err; 1887 goto netdev_err;
1950 } 1888 }
1951 1889
1952 /* obtain hba and initialize rest of the structure */ 1890 /* obtain interface and initialize rest of the structure */
1953 hba = bnx2fc_hba_lookup(phys_dev); 1891 hba = bnx2fc_hba_lookup(phys_dev);
1954 if (!hba) { 1892 if (!hba) {
1955 rc = -ENODEV; 1893 rc = -ENODEV;
@@ -1957,67 +1895,61 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1957 goto netdev_err; 1895 goto netdev_err;
1958 } 1896 }
1959 1897
1960 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1898 if (bnx2fc_interface_lookup(netdev)) {
1961 rc = bnx2fc_fw_init(hba);
1962 if (rc)
1963 goto netdev_err;
1964 }
1965
1966 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1967 rc = -EEXIST; 1899 rc = -EEXIST;
1968 goto netdev_err; 1900 goto netdev_err;
1969 } 1901 }
1970 1902
1971 /* update netdev with vlan netdev */ 1903 interface = bnx2fc_interface_create(hba, netdev, fip_mode);
1972 hba->netdev = netdev; 1904 if (!interface) {
1973 hba->vlan_id = vlan_id; 1905 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
1974 hba->vlan_enabled = 1;
1975
1976 rc = bnx2fc_interface_setup(hba, fip_mode);
1977 if (rc) {
1978 printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
1979 goto ifput_err; 1906 goto ifput_err;
1980 } 1907 }
1981 1908
1982 hba->timer_work_queue = 1909 interface->vlan_id = vlan_id;
1910 interface->vlan_enabled = 1;
1911
1912 interface->timer_work_queue =
1983 create_singlethread_workqueue("bnx2fc_timer_wq"); 1913 create_singlethread_workqueue("bnx2fc_timer_wq");
1984 if (!hba->timer_work_queue) { 1914 if (!interface->timer_work_queue) {
1985 printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); 1915 printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
1986 rc = -EINVAL; 1916 rc = -EINVAL;
1987 goto ifput_err; 1917 goto ifput_err;
1988 } 1918 }
1989 1919
1990 lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0); 1920 lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0);
1991 if (!lport) { 1921 if (!lport) {
1992 printk(KERN_ERR PFX "Failed to create interface (%s)\n", 1922 printk(KERN_ERR PFX "Failed to create interface (%s)\n",
1993 netdev->name); 1923 netdev->name);
1994 bnx2fc_netdev_cleanup(hba); 1924 bnx2fc_netdev_cleanup(interface);
1995 rc = -EINVAL; 1925 rc = -EINVAL;
1996 goto if_create_err; 1926 goto if_create_err;
1997 } 1927 }
1998 1928
1929 /* Add interface to if_list */
1930 list_add_tail(&interface->list, &if_list);
1931
1999 lport->boot_time = jiffies; 1932 lport->boot_time = jiffies;
2000 1933
2001 /* Make this master N_port */ 1934 /* Make this master N_port */
2002 hba->ctlr.lp = lport; 1935 interface->ctlr.lp = lport;
2003 1936
2004 set_bit(BNX2FC_CREATE_DONE, &hba->init_done); 1937 BNX2FC_HBA_DBG(lport, "create: START DISC\n");
2005 printk(KERN_ERR PFX "create: START DISC\n"); 1938 bnx2fc_start_disc(interface);
2006 bnx2fc_start_disc(hba);
2007 /* 1939 /*
2008 * Release from kref_init in bnx2fc_interface_setup, on success 1940 * Release from kref_init in bnx2fc_interface_setup, on success
2009 * lport should be holding a reference taken in bnx2fc_if_create 1941 * lport should be holding a reference taken in bnx2fc_if_create
2010 */ 1942 */
2011 bnx2fc_interface_put(hba); 1943 bnx2fc_interface_put(interface);
2012 /* put netdev that was held while calling dev_get_by_name */ 1944 /* put netdev that was held while calling dev_get_by_name */
2013 mutex_unlock(&bnx2fc_dev_lock); 1945 mutex_unlock(&bnx2fc_dev_lock);
2014 rtnl_unlock(); 1946 rtnl_unlock();
2015 return 0; 1947 return 0;
2016 1948
2017if_create_err: 1949if_create_err:
2018 destroy_workqueue(hba->timer_work_queue); 1950 destroy_workqueue(interface->timer_work_queue);
2019ifput_err: 1951ifput_err:
2020 bnx2fc_interface_put(hba); 1952 bnx2fc_interface_put(interface);
2021netdev_err: 1953netdev_err:
2022 module_put(THIS_MODULE); 1954 module_put(THIS_MODULE);
2023mod_err: 1955mod_err:
@@ -2027,7 +1959,7 @@ mod_err:
2027} 1959}
2028 1960
2029/** 1961/**
2030 * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance 1962 * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
2031 * 1963 *
2032 * @cnic: Pointer to cnic device instance 1964 * @cnic: Pointer to cnic device instance
2033 * 1965 *
@@ -2047,19 +1979,30 @@ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
2047 return NULL; 1979 return NULL;
2048} 1980}
2049 1981
2050static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) 1982static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
1983 *netdev)
1984{
1985 struct bnx2fc_interface *interface;
1986
1987 /* Called with bnx2fc_dev_lock held */
1988 list_for_each_entry(interface, &if_list, list) {
1989 if (interface->netdev == netdev)
1990 return interface;
1991 }
1992 return NULL;
1993}
1994
1995static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device
1996 *phys_dev)
2051{ 1997{
2052 struct list_head *list;
2053 struct list_head *temp;
2054 struct bnx2fc_hba *hba; 1998 struct bnx2fc_hba *hba;
2055 1999
2056 /* Called with bnx2fc_dev_lock held */ 2000 /* Called with bnx2fc_dev_lock held */
2057 list_for_each_safe(list, temp, &adapter_list) { 2001 list_for_each_entry(hba, &adapter_list, list) {
2058 hba = (struct bnx2fc_hba *)list;
2059 if (hba->phys_dev == phys_dev) 2002 if (hba->phys_dev == phys_dev)
2060 return hba; 2003 return hba;
2061 } 2004 }
2062 printk(KERN_ERR PFX "hba_lookup: hba NULL\n"); 2005 printk(KERN_ERR PFX "adapter_lookup: hba NULL\n");
2063 return NULL; 2006 return NULL;
2064} 2007}
2065 2008
@@ -2071,6 +2014,8 @@ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
2071static void bnx2fc_ulp_exit(struct cnic_dev *dev) 2014static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2072{ 2015{
2073 struct bnx2fc_hba *hba; 2016 struct bnx2fc_hba *hba;
2017 struct bnx2fc_interface *interface, *tmp;
2018 struct fc_lport *lport;
2074 2019
2075 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); 2020 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
2076 2021
@@ -2089,13 +2034,20 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2089 return; 2034 return;
2090 } 2035 }
2091 2036
2092 list_del_init(&hba->link); 2037 list_del_init(&hba->list);
2093 adapter_count--; 2038 adapter_count--;
2094 2039
2095 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { 2040 list_for_each_entry_safe(interface, tmp, &if_list, list) {
2096 /* destroy not called yet, move to quiesced list */ 2041 /* destroy not called yet, move to quiesced list */
2097 bnx2fc_netdev_cleanup(hba); 2042 if (interface->hba == hba) {
2098 bnx2fc_if_destroy(hba->ctlr.lp); 2043 bnx2fc_netdev_cleanup(interface);
2044 bnx2fc_stop(interface);
2045
2046 list_del(&interface->list);
2047 lport = interface->ctlr.lp;
2048 bnx2fc_interface_put(interface);
2049 bnx2fc_if_destroy(lport, hba);
2050 }
2099 } 2051 }
2100 mutex_unlock(&bnx2fc_dev_lock); 2052 mutex_unlock(&bnx2fc_dev_lock);
2101 2053
@@ -2103,7 +2055,7 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2103 /* unregister cnic device */ 2055 /* unregister cnic device */
2104 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) 2056 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
2105 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); 2057 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
2106 bnx2fc_interface_destroy(hba); 2058 bnx2fc_hba_destroy(hba);
2107} 2059}
2108 2060
2109/** 2061/**
@@ -2259,6 +2211,7 @@ static int __init bnx2fc_mod_init(void)
2259 } 2211 }
2260 2212
2261 INIT_LIST_HEAD(&adapter_list); 2213 INIT_LIST_HEAD(&adapter_list);
2214 INIT_LIST_HEAD(&if_list);
2262 mutex_init(&bnx2fc_dev_lock); 2215 mutex_init(&bnx2fc_dev_lock);
2263 adapter_count = 0; 2216 adapter_count = 0;
2264 2217
@@ -2336,16 +2289,17 @@ static void __exit bnx2fc_mod_exit(void)
2336 mutex_unlock(&bnx2fc_dev_lock); 2289 mutex_unlock(&bnx2fc_dev_lock);
2337 2290
2338 /* Unregister with cnic */ 2291 /* Unregister with cnic */
2339 list_for_each_entry_safe(hba, next, &to_be_deleted, link) { 2292 list_for_each_entry_safe(hba, next, &to_be_deleted, list) {
2340 list_del_init(&hba->link); 2293 list_del_init(&hba->list);
2341 printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n", 2294 printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n",
2342 hba, atomic_read(&hba->kref.refcount)); 2295 hba);
2343 bnx2fc_ulp_stop(hba); 2296 bnx2fc_ulp_stop(hba);
2344 /* unregister cnic device */ 2297 /* unregister cnic device */
2345 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, 2298 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
2346 &hba->reg_with_cnic)) 2299 &hba->reg_with_cnic))
2347 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); 2300 hba->cnic->unregister_device(hba->cnic,
2348 bnx2fc_interface_destroy(hba); 2301 CNIC_ULP_FCOE);
2302 bnx2fc_hba_destroy(hba);
2349 } 2303 }
2350 cnic_unregister_driver(CNIC_ULP_FCOE); 2304 cnic_unregister_driver(CNIC_ULP_FCOE);
2351 2305
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 09bdd9b88d1a..72cfb14acd3a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -2,7 +2,7 @@
2 * This file contains the code that low level functions that interact 2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware. 3 * with 57712 FCoE firmware.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -23,7 +23,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe); 23 struct fcoe_kcqe *ofld_kcqe);
24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); 24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, 25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *conn_destroy); 26 struct fcoe_kcqe *destroy_kcqe);
27 27
28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) 28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29{ 29{
@@ -67,7 +67,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
67 int rc = 0; 67 int rc = 0;
68 68
69 if (!hba->cnic) { 69 if (!hba->cnic) {
70 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n"); 70 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
71 return -ENODEV; 71 return -ENODEV;
72 } 72 }
73 73
@@ -103,6 +103,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; 103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; 104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
105 105
106
106 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; 107 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
107 fcoe_init2.hash_tbl_pbl_addr_hi = (u32) 108 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
108 ((u64) hba->hash_tbl_pbl_dma >> 32); 109 ((u64) hba->hash_tbl_pbl_dma >> 32);
@@ -165,7 +166,8 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
165 struct bnx2fc_rport *tgt) 166 struct bnx2fc_rport *tgt)
166{ 167{
167 struct fc_lport *lport = port->lport; 168 struct fc_lport *lport = port->lport;
168 struct bnx2fc_hba *hba = port->priv; 169 struct bnx2fc_interface *interface = port->priv;
170 struct bnx2fc_hba *hba = interface->hba;
169 struct kwqe *kwqe_arr[4]; 171 struct kwqe *kwqe_arr[4];
170 struct fcoe_kwqe_conn_offload1 ofld_req1; 172 struct fcoe_kwqe_conn_offload1 ofld_req1;
171 struct fcoe_kwqe_conn_offload2 ofld_req2; 173 struct fcoe_kwqe_conn_offload2 ofld_req2;
@@ -227,7 +229,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
227 ofld_req3.hdr.flags = 229 ofld_req3.hdr.flags =
228 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 230 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
229 231
230 ofld_req3.vlan_tag = hba->vlan_id << 232 ofld_req3.vlan_tag = interface->vlan_id <<
231 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; 233 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
232 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; 234 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
233 235
@@ -277,8 +279,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
277 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << 279 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); 280 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
279 281
282 /*
283 * Info from PRLI response, this info is used for sequence level error
284 * recovery support
285 */
286 if (tgt->dev_type == TYPE_TAPE) {
287 ofld_req3.flags |= 1 <<
288 FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
289 ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
290 ? 1 : 0) <<
291 FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
292 }
293
280 /* vlan flag */ 294 /* vlan flag */
281 ofld_req3.flags |= (hba->vlan_enabled << 295 ofld_req3.flags |= (interface->vlan_enabled <<
282 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); 296 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
283 297
284 /* C2_VALID and ACK flags are not set as they are not suppported */ 298 /* C2_VALID and ACK flags are not set as they are not suppported */
@@ -300,12 +314,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
300 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; 314 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
301 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; 315 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
302 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; 316 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
303 ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 317 ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
304 ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 318 /* fcf mac */
305 ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 319 ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
306 ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 320 ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
307 ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 321 ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
308 ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 322 ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
323 ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
309 324
310 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 325 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
311 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 326 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -335,7 +350,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
335 struct bnx2fc_rport *tgt) 350 struct bnx2fc_rport *tgt)
336{ 351{
337 struct kwqe *kwqe_arr[2]; 352 struct kwqe *kwqe_arr[2];
338 struct bnx2fc_hba *hba = port->priv; 353 struct bnx2fc_interface *interface = port->priv;
354 struct bnx2fc_hba *hba = interface->hba;
339 struct fcoe_kwqe_conn_enable_disable enbl_req; 355 struct fcoe_kwqe_conn_enable_disable enbl_req;
340 struct fc_lport *lport = port->lport; 356 struct fc_lport *lport = port->lport;
341 struct fc_rport *rport = tgt->rport; 357 struct fc_rport *rport = tgt->rport;
@@ -358,12 +374,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
358 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; 374 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
359 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); 375 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
360 376
361 enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 377 enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
362 enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 378 enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
363 enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 379 enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
364 enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 380 enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
365 enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 381 enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
366 enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 382 enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
367 383
368 port_id = fc_host_port_id(lport->host); 384 port_id = fc_host_port_id(lport->host);
369 if (port_id != tgt->sid) { 385 if (port_id != tgt->sid) {
@@ -379,10 +395,10 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
379 enbl_req.d_id[0] = (port_id & 0x000000FF); 395 enbl_req.d_id[0] = (port_id & 0x000000FF);
380 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; 396 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
381 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 397 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
382 enbl_req.vlan_tag = hba->vlan_id << 398 enbl_req.vlan_tag = interface->vlan_id <<
383 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 399 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
384 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 400 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
385 enbl_req.vlan_flag = hba->vlan_enabled; 401 enbl_req.vlan_flag = interface->vlan_enabled;
386 enbl_req.context_id = tgt->context_id; 402 enbl_req.context_id = tgt->context_id;
387 enbl_req.conn_id = tgt->fcoe_conn_id; 403 enbl_req.conn_id = tgt->fcoe_conn_id;
388 404
@@ -402,7 +418,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
402int bnx2fc_send_session_disable_req(struct fcoe_port *port, 418int bnx2fc_send_session_disable_req(struct fcoe_port *port,
403 struct bnx2fc_rport *tgt) 419 struct bnx2fc_rport *tgt)
404{ 420{
405 struct bnx2fc_hba *hba = port->priv; 421 struct bnx2fc_interface *interface = port->priv;
422 struct bnx2fc_hba *hba = interface->hba;
406 struct fcoe_kwqe_conn_enable_disable disable_req; 423 struct fcoe_kwqe_conn_enable_disable disable_req;
407 struct kwqe *kwqe_arr[2]; 424 struct kwqe *kwqe_arr[2];
408 struct fc_rport *rport = tgt->rport; 425 struct fc_rport *rport = tgt->rport;
@@ -423,12 +440,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
423 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; 440 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
424 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; 441 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
425 442
426 disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 443 disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
427 disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 444 disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
428 disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 445 disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
429 disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 446 disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
430 disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 447 disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
431 disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 448 disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
432 449
433 port_id = tgt->sid; 450 port_id = tgt->sid;
434 disable_req.s_id[0] = (port_id & 0x000000FF); 451 disable_req.s_id[0] = (port_id & 0x000000FF);
@@ -442,11 +459,11 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
442 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 459 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
443 disable_req.context_id = tgt->context_id; 460 disable_req.context_id = tgt->context_id;
444 disable_req.conn_id = tgt->fcoe_conn_id; 461 disable_req.conn_id = tgt->fcoe_conn_id;
445 disable_req.vlan_tag = hba->vlan_id << 462 disable_req.vlan_tag = interface->vlan_id <<
446 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 463 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
447 disable_req.vlan_tag |= 464 disable_req.vlan_tag |=
448 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 465 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
449 disable_req.vlan_flag = hba->vlan_enabled; 466 disable_req.vlan_flag = interface->vlan_enabled;
450 467
451 kwqe_arr[0] = (struct kwqe *) &disable_req; 468 kwqe_arr[0] = (struct kwqe *) &disable_req;
452 469
@@ -525,7 +542,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
525{ 542{
526 struct fcoe_port *port = tgt->port; 543 struct fcoe_port *port = tgt->port;
527 struct fc_lport *lport = port->lport; 544 struct fc_lport *lport = port->lport;
528 struct bnx2fc_hba *hba = port->priv; 545 struct bnx2fc_interface *interface = port->priv;
529 struct bnx2fc_unsol_els *unsol_els; 546 struct bnx2fc_unsol_els *unsol_els;
530 struct fc_frame_header *fh; 547 struct fc_frame_header *fh;
531 struct fc_frame *fp; 548 struct fc_frame *fp;
@@ -586,7 +603,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
586 fr_eof(fp) = FC_EOF_T; 603 fr_eof(fp) = FC_EOF_T;
587 fr_crc(fp) = cpu_to_le32(~crc); 604 fr_crc(fp) = cpu_to_le32(~crc);
588 unsol_els->lport = lport; 605 unsol_els->lport = lport;
589 unsol_els->hba = hba; 606 unsol_els->hba = interface->hba;
590 unsol_els->fp = fp; 607 unsol_els->fp = fp;
591 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); 608 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
592 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); 609 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
@@ -608,9 +625,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
608 u32 frame_len, len; 625 u32 frame_len, len;
609 struct bnx2fc_cmd *io_req = NULL; 626 struct bnx2fc_cmd *io_req = NULL;
610 struct fcoe_task_ctx_entry *task, *task_page; 627 struct fcoe_task_ctx_entry *task, *task_page;
611 struct bnx2fc_hba *hba = tgt->port->priv; 628 struct bnx2fc_interface *interface = tgt->port->priv;
629 struct bnx2fc_hba *hba = interface->hba;
612 int task_idx, index; 630 int task_idx, index;
613 int rc = 0; 631 int rc = 0;
632 u64 err_warn_bit_map;
633 u8 err_warn = 0xff;
614 634
615 635
616 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); 636 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
@@ -673,39 +693,43 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
673 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", 693 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
674 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 694 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
675 695
676 bnx2fc_return_rqe(tgt, 1);
677 696
678 if (xid > BNX2FC_MAX_XID) { 697 if (xid > BNX2FC_MAX_XID) {
679 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", 698 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
680 xid); 699 xid);
681 spin_unlock_bh(&tgt->tgt_lock); 700 goto ret_err_rqe;
682 break;
683 } 701 }
684 702
685 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 703 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
686 index = xid % BNX2FC_TASKS_PER_PAGE; 704 index = xid % BNX2FC_TASKS_PER_PAGE;
687 task_page = (struct fcoe_task_ctx_entry *) 705 task_page = (struct fcoe_task_ctx_entry *)
688 hba->task_ctx[task_idx]; 706 hba->task_ctx[task_idx];
689 task = &(task_page[index]); 707 task = &(task_page[index]);
690 708
691 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 709 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
692 if (!io_req) { 710 if (!io_req)
693 spin_unlock_bh(&tgt->tgt_lock); 711 goto ret_err_rqe;
694 break;
695 }
696 712
697 if (io_req->cmd_type != BNX2FC_SCSI_CMD) { 713 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
698 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); 714 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
699 spin_unlock_bh(&tgt->tgt_lock); 715 goto ret_err_rqe;
700 break;
701 } 716 }
702 717
703 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, 718 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
704 &io_req->req_flags)) { 719 &io_req->req_flags)) {
705 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " 720 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
706 "progress.. ignore unsol err\n"); 721 "progress.. ignore unsol err\n");
707 spin_unlock_bh(&tgt->tgt_lock); 722 goto ret_err_rqe;
708 break; 723 }
724
725 err_warn_bit_map = (u64)
726 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
727 (u64)err_entry->data.err_warn_bitmap_lo;
728 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
729 if (err_warn_bit_map & (u64)((u64)1 << i)) {
730 err_warn = i;
731 break;
732 }
709 } 733 }
710 734
711 /* 735 /*
@@ -715,26 +739,61 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
715 * logging out the target, when the ABTS eventually 739 * logging out the target, when the ABTS eventually
716 * times out. 740 * times out.
717 */ 741 */
718 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 742 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
719 &io_req->req_flags)) {
720 /*
721 * Cancel the timeout_work, as we received IO
722 * completion with FW error.
723 */
724 if (cancel_delayed_work(&io_req->timeout_work))
725 kref_put(&io_req->refcount,
726 bnx2fc_cmd_release); /* timer hold */
727
728 rc = bnx2fc_initiate_abts(io_req);
729 if (rc != SUCCESS) {
730 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
731 "failed. issue cleanup\n");
732 rc = bnx2fc_initiate_cleanup(io_req);
733 BUG_ON(rc);
734 }
735 } else
736 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " 743 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
737 "in ABTS processing\n", xid); 744 "in ABTS processing\n", xid);
745 goto ret_err_rqe;
746 }
747 BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
748 if (tgt->dev_type != TYPE_TAPE)
749 goto skip_rec;
750 switch (err_warn) {
751 case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
752 case FCOE_ERROR_CODE_DATA_OOO_RO:
753 case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
754 case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
755 case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
756 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
757 BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
758 xid);
759 memset(&io_req->err_entry, 0,
760 sizeof(struct fcoe_err_report_entry));
761 memcpy(&io_req->err_entry, err_entry,
762 sizeof(struct fcoe_err_report_entry));
763 if (!test_bit(BNX2FC_FLAG_SRR_SENT,
764 &io_req->req_flags)) {
765 spin_unlock_bh(&tgt->tgt_lock);
766 rc = bnx2fc_send_rec(io_req);
767 spin_lock_bh(&tgt->tgt_lock);
768
769 if (rc)
770 goto skip_rec;
771 } else
772 printk(KERN_ERR PFX "SRR in progress\n");
773 goto ret_err_rqe;
774 break;
775 default:
776 break;
777 }
778
779skip_rec:
780 set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
781 /*
782 * Cancel the timeout_work, as we received IO
783 * completion with FW error.
784 */
785 if (cancel_delayed_work(&io_req->timeout_work))
786 kref_put(&io_req->refcount, bnx2fc_cmd_release);
787
788 rc = bnx2fc_initiate_abts(io_req);
789 if (rc != SUCCESS) {
790 printk(KERN_ERR PFX "err_warn: initiate_abts "
791 "failed xid = 0x%x. issue cleanup\n",
792 io_req->xid);
793 bnx2fc_initiate_cleanup(io_req);
794 }
795ret_err_rqe:
796 bnx2fc_return_rqe(tgt, 1);
738 spin_unlock_bh(&tgt->tgt_lock); 797 spin_unlock_bh(&tgt->tgt_lock);
739 break; 798 break;
740 799
@@ -755,6 +814,47 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
755 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 814 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
756 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 815 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
757 816
817 if (xid > BNX2FC_MAX_XID) {
818 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
819 goto ret_warn_rqe;
820 }
821
822 err_warn_bit_map = (u64)
823 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
824 (u64)err_entry->data.err_warn_bitmap_lo;
825 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
826 if (err_warn_bit_map & (u64) (1 << i)) {
827 err_warn = i;
828 break;
829 }
830 }
831 BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
832
833 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
834 index = xid % BNX2FC_TASKS_PER_PAGE;
835 task_page = (struct fcoe_task_ctx_entry *)
836 interface->hba->task_ctx[task_idx];
837 task = &(task_page[index]);
838 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
839 if (!io_req)
840 goto ret_warn_rqe;
841
842 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
843 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
844 goto ret_warn_rqe;
845 }
846
847 memset(&io_req->err_entry, 0,
848 sizeof(struct fcoe_err_report_entry));
849 memcpy(&io_req->err_entry, err_entry,
850 sizeof(struct fcoe_err_report_entry));
851
852 if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
853 /* REC_TOV is not a warning code */
854 BUG_ON(1);
855 else
856 BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
857ret_warn_rqe:
758 bnx2fc_return_rqe(tgt, 1); 858 bnx2fc_return_rqe(tgt, 1);
759 spin_unlock_bh(&tgt->tgt_lock); 859 spin_unlock_bh(&tgt->tgt_lock);
760 break; 860 break;
@@ -770,7 +870,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
770 struct fcoe_task_ctx_entry *task; 870 struct fcoe_task_ctx_entry *task;
771 struct fcoe_task_ctx_entry *task_page; 871 struct fcoe_task_ctx_entry *task_page;
772 struct fcoe_port *port = tgt->port; 872 struct fcoe_port *port = tgt->port;
773 struct bnx2fc_hba *hba = port->priv; 873 struct bnx2fc_interface *interface = port->priv;
874 struct bnx2fc_hba *hba = interface->hba;
774 struct bnx2fc_cmd *io_req; 875 struct bnx2fc_cmd *io_req;
775 int task_idx, index; 876 int task_idx, index;
776 u16 xid; 877 u16 xid;
@@ -781,7 +882,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
781 spin_lock_bh(&tgt->tgt_lock); 882 spin_lock_bh(&tgt->tgt_lock);
782 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 883 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
783 if (xid >= BNX2FC_MAX_TASKS) { 884 if (xid >= BNX2FC_MAX_TASKS) {
784 printk(KERN_ALERT PFX "ERROR:xid out of range\n"); 885 printk(KERN_ERR PFX "ERROR:xid out of range\n");
785 spin_unlock_bh(&tgt->tgt_lock); 886 spin_unlock_bh(&tgt->tgt_lock);
786 return; 887 return;
787 } 888 }
@@ -861,6 +962,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
861 kref_put(&io_req->refcount, bnx2fc_cmd_release); 962 kref_put(&io_req->refcount, bnx2fc_cmd_release);
862 break; 963 break;
863 964
965 case BNX2FC_SEQ_CLEANUP:
966 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
967 io_req->xid);
968 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
969 kref_put(&io_req->refcount, bnx2fc_cmd_release);
970 break;
971
864 default: 972 default:
865 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); 973 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
866 break; 974 break;
@@ -962,8 +1070,10 @@ unlock:
962 1 - tgt->cq_curr_toggle_bit; 1070 1 - tgt->cq_curr_toggle_bit;
963 } 1071 }
964 } 1072 }
965 bnx2fc_arm_cq(tgt); 1073 if (num_free_sqes) {
966 atomic_add(num_free_sqes, &tgt->free_sqes); 1074 bnx2fc_arm_cq(tgt);
1075 atomic_add(num_free_sqes, &tgt->free_sqes);
1076 }
967 spin_unlock_bh(&tgt->cq_lock); 1077 spin_unlock_bh(&tgt->cq_lock);
968 return 0; 1078 return 0;
969} 1079}
@@ -983,7 +1093,7 @@ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
983 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; 1093 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
984 1094
985 if (!tgt) { 1095 if (!tgt) {
986 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id); 1096 printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
987 return; 1097 return;
988 } 1098 }
989 1099
@@ -1004,6 +1114,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1004{ 1114{
1005 struct bnx2fc_rport *tgt; 1115 struct bnx2fc_rport *tgt;
1006 struct fcoe_port *port; 1116 struct fcoe_port *port;
1117 struct bnx2fc_interface *interface;
1007 u32 conn_id; 1118 u32 conn_id;
1008 u32 context_id; 1119 u32 context_id;
1009 int rc; 1120 int rc;
@@ -1018,8 +1129,9 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1018 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", 1129 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1019 ofld_kcqe->fcoe_conn_context_id); 1130 ofld_kcqe->fcoe_conn_context_id);
1020 port = tgt->port; 1131 port = tgt->port;
1021 if (hba != tgt->port->priv) { 1132 interface = tgt->port->priv;
1022 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n"); 1133 if (hba != interface->hba) {
1134 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1023 goto ofld_cmpl_err; 1135 goto ofld_cmpl_err;
1024 } 1136 }
1025 /* 1137 /*
@@ -1040,7 +1152,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1040 /* now enable the session */ 1152 /* now enable the session */
1041 rc = bnx2fc_send_session_enable_req(port, tgt); 1153 rc = bnx2fc_send_session_enable_req(port, tgt);
1042 if (rc) { 1154 if (rc) {
1043 printk(KERN_ALERT PFX "enable session failed\n"); 1155 printk(KERN_ERR PFX "enable session failed\n");
1044 goto ofld_cmpl_err; 1156 goto ofld_cmpl_err;
1045 } 1157 }
1046 } 1158 }
@@ -1063,6 +1175,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1063 struct fcoe_kcqe *ofld_kcqe) 1175 struct fcoe_kcqe *ofld_kcqe)
1064{ 1176{
1065 struct bnx2fc_rport *tgt; 1177 struct bnx2fc_rport *tgt;
1178 struct bnx2fc_interface *interface;
1066 u32 conn_id; 1179 u32 conn_id;
1067 u32 context_id; 1180 u32 context_id;
1068 1181
@@ -1070,7 +1183,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1070 conn_id = ofld_kcqe->fcoe_conn_id; 1183 conn_id = ofld_kcqe->fcoe_conn_id;
1071 tgt = hba->tgt_ofld_list[conn_id]; 1184 tgt = hba->tgt_ofld_list[conn_id];
1072 if (!tgt) { 1185 if (!tgt) {
1073 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n"); 1186 printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1074 return; 1187 return;
1075 } 1188 }
1076 1189
@@ -1082,16 +1195,17 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1082 * and enable 1195 * and enable
1083 */ 1196 */
1084 if (tgt->context_id != context_id) { 1197 if (tgt->context_id != context_id) {
1085 printk(KERN_ALERT PFX "context id mis-match\n"); 1198 printk(KERN_ERR PFX "context id mis-match\n");
1086 return; 1199 return;
1087 } 1200 }
1088 if (hba != tgt->port->priv) { 1201 interface = tgt->port->priv;
1089 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); 1202 if (hba != interface->hba) {
1203 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1090 goto enbl_cmpl_err; 1204 goto enbl_cmpl_err;
1091 } 1205 }
1092 if (ofld_kcqe->completion_status) { 1206 if (ofld_kcqe->completion_status)
1093 goto enbl_cmpl_err; 1207 goto enbl_cmpl_err;
1094 } else { 1208 else {
1095 /* enable successful - rport ready for issuing IOs */ 1209 /* enable successful - rport ready for issuing IOs */
1096 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1210 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1097 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1211 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1114,14 +1228,14 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1114 conn_id = disable_kcqe->fcoe_conn_id; 1228 conn_id = disable_kcqe->fcoe_conn_id;
1115 tgt = hba->tgt_ofld_list[conn_id]; 1229 tgt = hba->tgt_ofld_list[conn_id];
1116 if (!tgt) { 1230 if (!tgt) {
1117 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n"); 1231 printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1118 return; 1232 return;
1119 } 1233 }
1120 1234
1121 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); 1235 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1122 1236
1123 if (disable_kcqe->completion_status) { 1237 if (disable_kcqe->completion_status) {
1124 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n", 1238 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1125 disable_kcqe->completion_status); 1239 disable_kcqe->completion_status);
1126 return; 1240 return;
1127 } else { 1241 } else {
@@ -1143,14 +1257,14 @@ static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1143 conn_id = destroy_kcqe->fcoe_conn_id; 1257 conn_id = destroy_kcqe->fcoe_conn_id;
1144 tgt = hba->tgt_ofld_list[conn_id]; 1258 tgt = hba->tgt_ofld_list[conn_id];
1145 if (!tgt) { 1259 if (!tgt) {
1146 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n"); 1260 printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1147 return; 1261 return;
1148 } 1262 }
1149 1263
1150 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); 1264 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1151 1265
1152 if (destroy_kcqe->completion_status) { 1266 if (destroy_kcqe->completion_status) {
1153 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n", 1267 printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1154 destroy_kcqe->completion_status); 1268 destroy_kcqe->completion_status);
1155 return; 1269 return;
1156 } else { 1270 } else {
@@ -1182,6 +1296,7 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1182 break; 1296 break;
1183 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: 1297 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1184 printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); 1298 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1299 break;
1185 default: 1300 default:
1186 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); 1301 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1187 } 1302 }
@@ -1240,7 +1355,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1240 } else { 1355 } else {
1241 printk(KERN_ERR PFX "DESTROY success\n"); 1356 printk(KERN_ERR PFX "DESTROY success\n");
1242 } 1357 }
1243 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; 1358 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1244 wake_up_interruptible(&hba->destroy_wait); 1359 wake_up_interruptible(&hba->destroy_wait);
1245 break; 1360 break;
1246 1361
@@ -1262,7 +1377,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1262 case FCOE_KCQE_OPCODE_FCOE_ERROR: 1377 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1263 /* fall thru */ 1378 /* fall thru */
1264 default: 1379 default:
1265 printk(KERN_ALERT PFX "unknown opcode 0x%x\n", 1380 printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1266 kcqe->op_code); 1381 kcqe->op_code);
1267 } 1382 }
1268 } 1383 }
@@ -1305,7 +1420,8 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1305 struct fcoe_port *port = tgt->port; 1420 struct fcoe_port *port = tgt->port;
1306 u32 reg_off; 1421 u32 reg_off;
1307 resource_size_t reg_base; 1422 resource_size_t reg_base;
1308 struct bnx2fc_hba *hba = port->priv; 1423 struct bnx2fc_interface *interface = port->priv;
1424 struct bnx2fc_hba *hba = interface->hba;
1309 1425
1310 reg_base = pci_resource_start(hba->pcidev, 1426 reg_base = pci_resource_start(hba->pcidev,
1311 BNX2X_DOORBELL_PCI_BAR); 1427 BNX2X_DOORBELL_PCI_BAR);
@@ -1344,6 +1460,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1344 tgt->conn_db->rq_prod = tgt->rq_prod_idx; 1460 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1345} 1461}
1346 1462
1463void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1464 struct fcoe_task_ctx_entry *task,
1465 struct bnx2fc_cmd *orig_io_req,
1466 u32 offset)
1467{
1468 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1469 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1470 struct bnx2fc_interface *interface = tgt->port->priv;
1471 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1472 struct fcoe_task_ctx_entry *orig_task;
1473 struct fcoe_task_ctx_entry *task_page;
1474 struct fcoe_ext_mul_sges_ctx *sgl;
1475 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1476 u8 orig_task_type;
1477 u16 orig_xid = orig_io_req->xid;
1478 u32 context_id = tgt->context_id;
1479 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1480 u32 orig_offset = offset;
1481 int bd_count;
1482 int orig_task_idx, index;
1483 int i;
1484
1485 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1486
1487 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1488 orig_task_type = FCOE_TASK_TYPE_WRITE;
1489 else
1490 orig_task_type = FCOE_TASK_TYPE_READ;
1491
1492 /* Tx flags */
1493 task->txwr_rxrd.const_ctx.tx_flags =
1494 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1495 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1496 /* init flags */
1497 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1498 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1499 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1500 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1501 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1502 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1503 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1504 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1505
1506 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1507
1508 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1509 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1510
1511 bd_count = orig_io_req->bd_tbl->bd_valid;
1512
1513 /* obtain the appropriate bd entry from relative offset */
1514 for (i = 0; i < bd_count; i++) {
1515 if (offset < bd[i].buf_len)
1516 break;
1517 offset -= bd[i].buf_len;
1518 }
1519 phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1520
1521 if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1522 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1523 (u32)phys_addr;
1524 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1525 (u32)((u64)phys_addr >> 32);
1526 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1527 bd_count;
1528 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1529 offset; /* adjusted offset */
1530 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1531 } else {
1532 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1533 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1534
1535 task_page = (struct fcoe_task_ctx_entry *)
1536 interface->hba->task_ctx[orig_task_idx];
1537 orig_task = &(task_page[index]);
1538
1539 /* Multiple SGEs were used for this IO */
1540 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1541 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1542 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1543 sgl->mul_sgl.sgl_size = bd_count;
1544 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1545 sgl->mul_sgl.cur_sge_idx = i;
1546
1547 memset(&task->rxwr_only.rx_seq_ctx, 0,
1548 sizeof(struct fcoe_rx_seq_ctx));
1549 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1550 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1551 }
1552}
1347void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 1553void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1348 struct fcoe_task_ctx_entry *task, 1554 struct fcoe_task_ctx_entry *task,
1349 u16 orig_xid) 1555 u16 orig_xid)
@@ -1360,7 +1566,12 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1360 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1566 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1361 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1567 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1362 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; 1568 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1363 task->txwr_rxrd.const_ctx.init_flags |= 1569 if (tgt->dev_type == TYPE_TAPE)
1570 task->txwr_rxrd.const_ctx.init_flags |=
1571 FCOE_TASK_DEV_TYPE_TAPE <<
1572 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1573 else
1574 task->txwr_rxrd.const_ctx.init_flags |=
1364 FCOE_TASK_DEV_TYPE_DISK << 1575 FCOE_TASK_DEV_TYPE_DISK <<
1365 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1576 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1366 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; 1577 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
@@ -1420,7 +1631,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1420 /* init flags */ 1631 /* init flags */
1421 task->txwr_rxrd.const_ctx.init_flags = task_type << 1632 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1422 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1633 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1423 task->txwr_rxrd.const_ctx.init_flags |= 1634 if (tgt->dev_type == TYPE_TAPE)
1635 task->txwr_rxrd.const_ctx.init_flags |=
1636 FCOE_TASK_DEV_TYPE_TAPE <<
1637 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1638 else
1639 task->txwr_rxrd.const_ctx.init_flags |=
1424 FCOE_TASK_DEV_TYPE_DISK << 1640 FCOE_TASK_DEV_TYPE_DISK <<
1425 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1641 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1426 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1642 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
@@ -1477,6 +1693,7 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1477 struct bnx2fc_rport *tgt = io_req->tgt; 1693 struct bnx2fc_rport *tgt = io_req->tgt;
1478 struct fcoe_cached_sge_ctx *cached_sge; 1694 struct fcoe_cached_sge_ctx *cached_sge;
1479 struct fcoe_ext_mul_sges_ctx *sgl; 1695 struct fcoe_ext_mul_sges_ctx *sgl;
1696 int dev_type = tgt->dev_type;
1480 u64 *fcp_cmnd; 1697 u64 *fcp_cmnd;
1481 u64 tmp_fcp_cmnd[4]; 1698 u64 tmp_fcp_cmnd[4];
1482 u32 context_id; 1699 u32 context_id;
@@ -1494,20 +1711,40 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1494 task_type = FCOE_TASK_TYPE_READ; 1711 task_type = FCOE_TASK_TYPE_READ;
1495 1712
1496 /* Tx only */ 1713 /* Tx only */
1714 bd_count = bd_tbl->bd_valid;
1497 if (task_type == FCOE_TASK_TYPE_WRITE) { 1715 if (task_type == FCOE_TASK_TYPE_WRITE) {
1498 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = 1716 if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1499 (u32)bd_tbl->bd_tbl_dma; 1717 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1500 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = 1718
1501 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1719 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1502 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1720 fcoe_bd_tbl->buf_addr_lo;
1503 bd_tbl->bd_valid; 1721 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1722 fcoe_bd_tbl->buf_addr_hi;
1723 task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1724 fcoe_bd_tbl->buf_len;
1725
1726 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1727 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1728 } else {
1729 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1730 (u32)bd_tbl->bd_tbl_dma;
1731 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1732 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1733 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1734 bd_tbl->bd_valid;
1735 }
1504 } 1736 }
1505 1737
1506 /*Tx Write Rx Read */ 1738 /*Tx Write Rx Read */
1507 /* Init state to NORMAL */ 1739 /* Init state to NORMAL */
1508 task->txwr_rxrd.const_ctx.init_flags = task_type << 1740 task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1509 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1741 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1510 task->txwr_rxrd.const_ctx.init_flags |= 1742 if (dev_type == TYPE_TAPE)
1743 task->txwr_rxrd.const_ctx.init_flags |=
1744 FCOE_TASK_DEV_TYPE_TAPE <<
1745 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1746 else
1747 task->txwr_rxrd.const_ctx.init_flags |=
1511 FCOE_TASK_DEV_TYPE_DISK << 1748 FCOE_TASK_DEV_TYPE_DISK <<
1512 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1749 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1513 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1750 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
@@ -1550,7 +1787,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1550 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; 1787 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1551 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; 1788 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1552 bd_count = bd_tbl->bd_valid; 1789 bd_count = bd_tbl->bd_valid;
1553 if (task_type == FCOE_TASK_TYPE_READ) { 1790 if (task_type == FCOE_TASK_TYPE_READ &&
1791 dev_type == TYPE_DISK) {
1554 if (bd_count == 1) { 1792 if (bd_count == 1) {
1555 1793
1556 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; 1794 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
@@ -1582,6 +1820,11 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1582 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1820 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1583 sgl->mul_sgl.sgl_size = bd_count; 1821 sgl->mul_sgl.sgl_size = bd_count;
1584 } 1822 }
1823 } else {
1824 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1825 sgl->mul_sgl.cur_sge_addr.hi =
1826 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1827 sgl->mul_sgl.sgl_size = bd_count;
1585 } 1828 }
1586} 1829}
1587 1830
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 45eba6d609c9..6cc3789075bc 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. 1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing. 2 * IO manager and SCSI IO processing.
3 * 3 *
4 * Copyright (c) 2008 - 2010 Broadcom Corporation 4 * Copyright (c) 2008 - 2011 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -18,8 +18,6 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
18 int bd_index); 18 int bd_index);
19static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 19static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
20static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 20static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
21static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
22 struct bnx2fc_cmd *io_req);
23static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 21static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
24static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 22static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
25static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 23static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
@@ -29,10 +27,11 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
29void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 27void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
30 unsigned int timer_msec) 28 unsigned int timer_msec)
31{ 29{
32 struct bnx2fc_hba *hba = io_req->port->priv; 30 struct bnx2fc_interface *interface = io_req->port->priv;
33 31
34 if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work, 32 if (queue_delayed_work(interface->timer_work_queue,
35 msecs_to_jiffies(timer_msec))) 33 &io_req->timeout_work,
34 msecs_to_jiffies(timer_msec)))
36 kref_get(&io_req->refcount); 35 kref_get(&io_req->refcount);
37} 36}
38 37
@@ -217,6 +216,11 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
217 return; 216 return;
218 217
219 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 218 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
219 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
220 /* Do not call scsi done for this IO */
221 return;
222 }
223
220 bnx2fc_unmap_sg_list(io_req); 224 bnx2fc_unmap_sg_list(io_req);
221 io_req->sc_cmd = NULL; 225 io_req->sc_cmd = NULL;
222 if (!sc_cmd) { 226 if (!sc_cmd) {
@@ -419,8 +423,8 @@ free_cmgr:
419struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 423struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
420{ 424{
421 struct fcoe_port *port = tgt->port; 425 struct fcoe_port *port = tgt->port;
422 struct bnx2fc_hba *hba = port->priv; 426 struct bnx2fc_interface *interface = port->priv;
423 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; 427 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
424 struct bnx2fc_cmd *io_req; 428 struct bnx2fc_cmd *io_req;
425 struct list_head *listp; 429 struct list_head *listp;
426 struct io_bdt *bd_tbl; 430 struct io_bdt *bd_tbl;
@@ -485,11 +489,12 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
485 kref_init(&io_req->refcount); 489 kref_init(&io_req->refcount);
486 return io_req; 490 return io_req;
487} 491}
488static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 492
493struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
489{ 494{
490 struct fcoe_port *port = tgt->port; 495 struct fcoe_port *port = tgt->port;
491 struct bnx2fc_hba *hba = port->priv; 496 struct bnx2fc_interface *interface = port->priv;
492 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; 497 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
493 struct bnx2fc_cmd *io_req; 498 struct bnx2fc_cmd *io_req;
494 struct list_head *listp; 499 struct list_head *listp;
495 struct io_bdt *bd_tbl; 500 struct io_bdt *bd_tbl;
@@ -570,7 +575,8 @@ void bnx2fc_cmd_release(struct kref *ref)
570static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 575static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
571{ 576{
572 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 577 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
573 struct bnx2fc_hba *hba = io_req->port->priv; 578 struct bnx2fc_interface *interface = io_req->port->priv;
579 struct bnx2fc_hba *hba = interface->hba;
574 size_t sz = sizeof(struct fcoe_bd_ctx); 580 size_t sz = sizeof(struct fcoe_bd_ctx);
575 581
576 /* clear tm flags */ 582 /* clear tm flags */
@@ -606,7 +612,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
606 struct bnx2fc_mp_req *mp_req; 612 struct bnx2fc_mp_req *mp_req;
607 struct fcoe_bd_ctx *mp_req_bd; 613 struct fcoe_bd_ctx *mp_req_bd;
608 struct fcoe_bd_ctx *mp_resp_bd; 614 struct fcoe_bd_ctx *mp_resp_bd;
609 struct bnx2fc_hba *hba = io_req->port->priv; 615 struct bnx2fc_interface *interface = io_req->port->priv;
616 struct bnx2fc_hba *hba = interface->hba;
610 dma_addr_t addr; 617 dma_addr_t addr;
611 size_t sz; 618 size_t sz;
612 619
@@ -682,7 +689,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
682 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 689 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
683 struct fc_rport_libfc_priv *rp = rport->dd_data; 690 struct fc_rport_libfc_priv *rp = rport->dd_data;
684 struct fcoe_port *port; 691 struct fcoe_port *port;
685 struct bnx2fc_hba *hba; 692 struct bnx2fc_interface *interface;
686 struct bnx2fc_rport *tgt; 693 struct bnx2fc_rport *tgt;
687 struct bnx2fc_cmd *io_req; 694 struct bnx2fc_cmd *io_req;
688 struct bnx2fc_mp_req *tm_req; 695 struct bnx2fc_mp_req *tm_req;
@@ -699,10 +706,10 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
699 706
700 lport = shost_priv(host); 707 lport = shost_priv(host);
701 port = lport_priv(lport); 708 port = lport_priv(lport);
702 hba = port->priv; 709 interface = port->priv;
703 710
704 if (rport == NULL) { 711 if (rport == NULL) {
705 printk(KERN_ALERT PFX "device_reset: rport is NULL\n"); 712 printk(KERN_ERR PFX "device_reset: rport is NULL\n");
706 rc = FAILED; 713 rc = FAILED;
707 goto tmf_err; 714 goto tmf_err;
708 } 715 }
@@ -745,7 +752,9 @@ retry_tmf:
745 rc = bnx2fc_init_mp_req(io_req); 752 rc = bnx2fc_init_mp_req(io_req);
746 if (rc == FAILED) { 753 if (rc == FAILED) {
747 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 754 printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
755 spin_lock_bh(&tgt->tgt_lock);
748 kref_put(&io_req->refcount, bnx2fc_cmd_release); 756 kref_put(&io_req->refcount, bnx2fc_cmd_release);
757 spin_unlock_bh(&tgt->tgt_lock);
749 goto tmf_err; 758 goto tmf_err;
750 } 759 }
751 760
@@ -774,7 +783,8 @@ retry_tmf:
774 index = xid % BNX2FC_TASKS_PER_PAGE; 783 index = xid % BNX2FC_TASKS_PER_PAGE;
775 784
776 /* Initialize task context for this IO request */ 785 /* Initialize task context for this IO request */
777 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 786 task_page = (struct fcoe_task_ctx_entry *)
787 interface->hba->task_ctx[task_idx];
778 task = &(task_page[index]); 788 task = &(task_page[index]);
779 bnx2fc_init_mp_task(io_req, task); 789 bnx2fc_init_mp_task(io_req, task);
780 790
@@ -806,10 +816,10 @@ retry_tmf:
806 spin_unlock_bh(&tgt->tgt_lock); 816 spin_unlock_bh(&tgt->tgt_lock);
807 817
808 if (!rc) { 818 if (!rc) {
809 printk(KERN_ERR PFX "task mgmt command failed...\n"); 819 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
810 rc = FAILED; 820 rc = FAILED;
811 } else { 821 } else {
812 printk(KERN_ERR PFX "task mgmt command success...\n"); 822 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
813 rc = SUCCESS; 823 rc = SUCCESS;
814 } 824 }
815tmf_err: 825tmf_err:
@@ -822,7 +832,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
822 struct bnx2fc_rport *tgt = io_req->tgt; 832 struct bnx2fc_rport *tgt = io_req->tgt;
823 struct fc_rport *rport = tgt->rport; 833 struct fc_rport *rport = tgt->rport;
824 struct fc_rport_priv *rdata = tgt->rdata; 834 struct fc_rport_priv *rdata = tgt->rdata;
825 struct bnx2fc_hba *hba; 835 struct bnx2fc_interface *interface;
826 struct fcoe_port *port; 836 struct fcoe_port *port;
827 struct bnx2fc_cmd *abts_io_req; 837 struct bnx2fc_cmd *abts_io_req;
828 struct fcoe_task_ctx_entry *task; 838 struct fcoe_task_ctx_entry *task;
@@ -839,7 +849,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
839 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 849 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
840 850
841 port = io_req->port; 851 port = io_req->port;
842 hba = port->priv; 852 interface = port->priv;
843 lport = port->lport; 853 lport = port->lport;
844 854
845 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 855 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
@@ -849,7 +859,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
849 } 859 }
850 860
851 if (rport == NULL) { 861 if (rport == NULL) {
852 printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n"); 862 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
853 rc = FAILED; 863 rc = FAILED;
854 goto abts_err; 864 goto abts_err;
855 } 865 }
@@ -896,7 +906,8 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
896 index = xid % BNX2FC_TASKS_PER_PAGE; 906 index = xid % BNX2FC_TASKS_PER_PAGE;
897 907
898 /* Initialize task context for this IO request */ 908 /* Initialize task context for this IO request */
899 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 909 task_page = (struct fcoe_task_ctx_entry *)
910 interface->hba->task_ctx[task_idx];
900 task = &(task_page[index]); 911 task = &(task_page[index]);
901 bnx2fc_init_mp_task(abts_io_req, task); 912 bnx2fc_init_mp_task(abts_io_req, task);
902 913
@@ -924,11 +935,81 @@ abts_err:
924 return rc; 935 return rc;
925} 936}
926 937
938int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
939 enum fc_rctl r_ctl)
940{
941 struct fc_lport *lport;
942 struct bnx2fc_rport *tgt = orig_io_req->tgt;
943 struct bnx2fc_interface *interface;
944 struct fcoe_port *port;
945 struct bnx2fc_cmd *seq_clnp_req;
946 struct fcoe_task_ctx_entry *task;
947 struct fcoe_task_ctx_entry *task_page;
948 struct bnx2fc_els_cb_arg *cb_arg = NULL;
949 int task_idx, index;
950 u16 xid;
951 int rc = 0;
952
953 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
954 orig_io_req->xid);
955 kref_get(&orig_io_req->refcount);
956
957 port = orig_io_req->port;
958 interface = port->priv;
959 lport = port->lport;
960
961 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
962 if (!cb_arg) {
963 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
964 rc = -ENOMEM;
965 goto cleanup_err;
966 }
967
968 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
969 if (!seq_clnp_req) {
970 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
971 rc = -ENOMEM;
972 kfree(cb_arg);
973 goto cleanup_err;
974 }
975 /* Initialize rest of io_req fields */
976 seq_clnp_req->sc_cmd = NULL;
977 seq_clnp_req->port = port;
978 seq_clnp_req->tgt = tgt;
979 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
980
981 xid = seq_clnp_req->xid;
982
983 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
984 index = xid % BNX2FC_TASKS_PER_PAGE;
985
986 /* Initialize task context for this IO request */
987 task_page = (struct fcoe_task_ctx_entry *)
988 interface->hba->task_ctx[task_idx];
989 task = &(task_page[index]);
990 cb_arg->aborted_io_req = orig_io_req;
991 cb_arg->io_req = seq_clnp_req;
992 cb_arg->r_ctl = r_ctl;
993 cb_arg->offset = offset;
994 seq_clnp_req->cb_arg = cb_arg;
995
996 printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
997 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
998
999 /* Obtain free SQ entry */
1000 bnx2fc_add_2_sq(tgt, xid);
1001
1002 /* Ring doorbell */
1003 bnx2fc_ring_doorbell(tgt);
1004cleanup_err:
1005 return rc;
1006}
1007
927int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 1008int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
928{ 1009{
929 struct fc_lport *lport; 1010 struct fc_lport *lport;
930 struct bnx2fc_rport *tgt = io_req->tgt; 1011 struct bnx2fc_rport *tgt = io_req->tgt;
931 struct bnx2fc_hba *hba; 1012 struct bnx2fc_interface *interface;
932 struct fcoe_port *port; 1013 struct fcoe_port *port;
933 struct bnx2fc_cmd *cleanup_io_req; 1014 struct bnx2fc_cmd *cleanup_io_req;
934 struct fcoe_task_ctx_entry *task; 1015 struct fcoe_task_ctx_entry *task;
@@ -941,7 +1022,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
941 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 1022 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
942 1023
943 port = io_req->port; 1024 port = io_req->port;
944 hba = port->priv; 1025 interface = port->priv;
945 lport = port->lport; 1026 lport = port->lport;
946 1027
947 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1028 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
@@ -963,7 +1044,8 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
963 index = xid % BNX2FC_TASKS_PER_PAGE; 1044 index = xid % BNX2FC_TASKS_PER_PAGE;
964 1045
965 /* Initialize task context for this IO request */ 1046 /* Initialize task context for this IO request */
966 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 1047 task_page = (struct fcoe_task_ctx_entry *)
1048 interface->hba->task_ctx[task_idx];
967 task = &(task_page[index]); 1049 task = &(task_page[index]);
968 orig_xid = io_req->xid; 1050 orig_xid = io_req->xid;
969 1051
@@ -1031,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1031 1113
1032 lport = shost_priv(sc_cmd->device->host); 1114 lport = shost_priv(sc_cmd->device->host);
1033 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1115 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1034 printk(KERN_ALERT PFX "eh_abort: link not ready\n"); 1116 printk(KERN_ERR PFX "eh_abort: link not ready\n");
1035 return rc; 1117 return rc;
1036 } 1118 }
1037 1119
@@ -1062,7 +1144,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1062 * io_req is no longer in the active_q. 1144 * io_req is no longer in the active_q.
1063 */ 1145 */
1064 if (tgt->flush_in_prog) { 1146 if (tgt->flush_in_prog) {
1065 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1147 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1066 "flush in progress\n", io_req->xid); 1148 "flush in progress\n", io_req->xid);
1067 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1149 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1068 spin_unlock_bh(&tgt->tgt_lock); 1150 spin_unlock_bh(&tgt->tgt_lock);
@@ -1070,7 +1152,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1070 } 1152 }
1071 1153
1072 if (io_req->on_active_queue == 0) { 1154 if (io_req->on_active_queue == 0) {
1073 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1155 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1074 "not on active_q\n", io_req->xid); 1156 "not on active_q\n", io_req->xid);
1075 /* 1157 /*
1076 * This condition can happen only due to the FW bug, 1158 * This condition can happen only due to the FW bug,
@@ -1108,7 +1190,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1108 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1190 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1109 rc = bnx2fc_initiate_abts(io_req); 1191 rc = bnx2fc_initiate_abts(io_req);
1110 } else { 1192 } else {
1111 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1193 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1112 "already in abts processing\n", io_req->xid); 1194 "already in abts processing\n", io_req->xid);
1113 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1195 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1114 spin_unlock_bh(&tgt->tgt_lock); 1196 spin_unlock_bh(&tgt->tgt_lock);
@@ -1149,6 +1231,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1149 return rc; 1231 return rc;
1150} 1232}
1151 1233
1234void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1235 struct fcoe_task_ctx_entry *task,
1236 u8 rx_state)
1237{
1238 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1239 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1240 u32 offset = cb_arg->offset;
1241 enum fc_rctl r_ctl = cb_arg->r_ctl;
1242 int rc = 0;
1243 struct bnx2fc_rport *tgt = orig_io_req->tgt;
1244
1245 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1246 "cmd_type = %d\n",
1247 seq_clnp_req->xid, seq_clnp_req->cmd_type);
1248
1249 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1250 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1251 seq_clnp_req->xid);
1252 goto free_cb_arg;
1253 }
1254 kref_get(&orig_io_req->refcount);
1255
1256 spin_unlock_bh(&tgt->tgt_lock);
1257 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1258 spin_lock_bh(&tgt->tgt_lock);
1259
1260 if (rc)
1261 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1262 " IO will abort\n");
1263 seq_clnp_req->cb_arg = NULL;
1264 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1265free_cb_arg:
1266 kfree(cb_arg);
1267 return;
1268}
1269
1152void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1270void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1153 struct fcoe_task_ctx_entry *task, 1271 struct fcoe_task_ctx_entry *task,
1154 u8 num_rq) 1272 u8 num_rq)
@@ -1378,7 +1496,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1378 fc_hdr->fh_r_ctl); 1496 fc_hdr->fh_r_ctl);
1379 } 1497 }
1380 if (!sc_cmd->SCp.ptr) { 1498 if (!sc_cmd->SCp.ptr) {
1381 printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n"); 1499 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
1382 return; 1500 return;
1383 } 1501 }
1384 switch (io_req->fcp_status) { 1502 switch (io_req->fcp_status) {
@@ -1410,7 +1528,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1410 io_req->on_tmf_queue = 0; 1528 io_req->on_tmf_queue = 0;
1411 } else { 1529 } else {
1412 1530
1413 printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n"); 1531 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1414 return; 1532 return;
1415 } 1533 }
1416 1534
@@ -1597,7 +1715,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1597 1715
1598 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1716 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1599 /* Invalid sense sense length. */ 1717 /* Invalid sense sense length. */
1600 printk(KERN_ALERT PFX "invalid sns length %d\n", 1718 printk(KERN_ERR PFX "invalid sns length %d\n",
1601 rq_buff_len); 1719 rq_buff_len);
1602 /* reset rq_buff_len */ 1720 /* reset rq_buff_len */
1603 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1721 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
@@ -1780,7 +1898,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1780 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1898 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1781 break; 1899 break;
1782 default: 1900 default:
1783 printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n", 1901 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
1784 io_req->fcp_status); 1902 io_req->fcp_status);
1785 break; 1903 break;
1786 } 1904 }
@@ -1789,14 +1907,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1789 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1907 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1790} 1908}
1791 1909
1792static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 1910int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1793 struct bnx2fc_cmd *io_req) 1911 struct bnx2fc_cmd *io_req)
1794{ 1912{
1795 struct fcoe_task_ctx_entry *task; 1913 struct fcoe_task_ctx_entry *task;
1796 struct fcoe_task_ctx_entry *task_page; 1914 struct fcoe_task_ctx_entry *task_page;
1797 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1915 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1798 struct fcoe_port *port = tgt->port; 1916 struct fcoe_port *port = tgt->port;
1799 struct bnx2fc_hba *hba = port->priv; 1917 struct bnx2fc_interface *interface = port->priv;
1918 struct bnx2fc_hba *hba = interface->hba;
1800 struct fc_lport *lport = port->lport; 1919 struct fc_lport *lport = port->lport;
1801 struct fcoe_dev_stats *stats; 1920 struct fcoe_dev_stats *stats;
1802 int task_idx, index; 1921 int task_idx, index;
@@ -1854,7 +1973,8 @@ static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1854 } 1973 }
1855 1974
1856 /* Time IO req */ 1975 /* Time IO req */
1857 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 1976 if (tgt->io_timeout)
1977 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
1858 /* Obtain free SQ entry */ 1978 /* Obtain free SQ entry */
1859 bnx2fc_add_2_sq(tgt, xid); 1979 bnx2fc_add_2_sq(tgt, xid);
1860 1980
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 3e892bd66fbe..d5311b577cca 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -2,7 +2,7 @@
2 * Handles operations such as session offload/upload etc, and manages 2 * Handles operations such as session offload/upload etc, and manages
3 * session resources such as connection id and qp resources. 3 * session resources such as connection id and qp resources.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -65,7 +65,8 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
65{ 65{
66 struct fc_lport *lport = rdata->local_port; 66 struct fc_lport *lport = rdata->local_port;
67 struct fc_rport *rport = rdata->rport; 67 struct fc_rport *rport = rdata->rport;
68 struct bnx2fc_hba *hba = port->priv; 68 struct bnx2fc_interface *interface = port->priv;
69 struct bnx2fc_hba *hba = interface->hba;
69 int rval; 70 int rval;
70 int i = 0; 71 int i = 0;
71 72
@@ -237,7 +238,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
237static void bnx2fc_upload_session(struct fcoe_port *port, 238static void bnx2fc_upload_session(struct fcoe_port *port,
238 struct bnx2fc_rport *tgt) 239 struct bnx2fc_rport *tgt)
239{ 240{
240 struct bnx2fc_hba *hba = port->priv; 241 struct bnx2fc_interface *interface = port->priv;
242 struct bnx2fc_hba *hba = interface->hba;
241 243
242 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", 244 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
243 tgt->num_active_ios.counter); 245 tgt->num_active_ios.counter);
@@ -316,7 +318,8 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
316{ 318{
317 319
318 struct fc_rport *rport = rdata->rport; 320 struct fc_rport *rport = rdata->rport;
319 struct bnx2fc_hba *hba = port->priv; 321 struct bnx2fc_interface *interface = port->priv;
322 struct bnx2fc_hba *hba = interface->hba;
320 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; 323 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
321 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; 324 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
322 325
@@ -350,6 +353,14 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
350 tgt->rq_cons_idx = 0; 353 tgt->rq_cons_idx = 0;
351 atomic_set(&tgt->num_active_ios, 0); 354 atomic_set(&tgt->num_active_ios, 0);
352 355
356 if (rdata->flags & FC_RP_FLAGS_RETRY) {
357 tgt->dev_type = TYPE_TAPE;
358 tgt->io_timeout = 0; /* use default ULP timeout */
359 } else {
360 tgt->dev_type = TYPE_DISK;
361 tgt->io_timeout = BNX2FC_IO_TIMEOUT;
362 }
363
353 /* initialize sq doorbell */ 364 /* initialize sq doorbell */
354 sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; 365 sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
355 sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << 366 sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
@@ -392,7 +403,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
392 enum fc_rport_event event) 403 enum fc_rport_event event)
393{ 404{
394 struct fcoe_port *port = lport_priv(lport); 405 struct fcoe_port *port = lport_priv(lport);
395 struct bnx2fc_hba *hba = port->priv; 406 struct bnx2fc_interface *interface = port->priv;
407 struct bnx2fc_hba *hba = interface->hba;
396 struct fc_rport *rport = rdata->rport; 408 struct fc_rport *rport = rdata->rport;
397 struct fc_rport_libfc_priv *rp; 409 struct fc_rport_libfc_priv *rp;
398 struct bnx2fc_rport *tgt; 410 struct bnx2fc_rport *tgt;
@@ -403,7 +415,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
403 switch (event) { 415 switch (event) {
404 case RPORT_EV_READY: 416 case RPORT_EV_READY:
405 if (!rport) { 417 if (!rport) {
406 printk(KERN_ALERT PFX "rport is NULL: ERROR!\n"); 418 printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
407 break; 419 break;
408 } 420 }
409 421
@@ -415,7 +427,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
415 * We should not come here, as lport will 427 * We should not come here, as lport will
416 * take care of fabric login 428 * take care of fabric login
417 */ 429 */
418 printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n", 430 printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
419 rdata->ids.port_id); 431 rdata->ids.port_id);
420 break; 432 break;
421 } 433 }
@@ -483,7 +495,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
483 break; 495 break;
484 496
485 if (!rport) { 497 if (!rport) {
486 printk(KERN_ALERT PFX "%x - rport not created Yet!!\n", 498 printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
487 port_id); 499 port_id);
488 break; 500 break;
489 } 501 }
@@ -537,7 +549,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
537struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, 549struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
538 u32 port_id) 550 u32 port_id)
539{ 551{
540 struct bnx2fc_hba *hba = port->priv; 552 struct bnx2fc_interface *interface = port->priv;
553 struct bnx2fc_hba *hba = interface->hba;
541 struct bnx2fc_rport *tgt; 554 struct bnx2fc_rport *tgt;
542 struct fc_rport_priv *rdata; 555 struct fc_rport_priv *rdata;
543 int i; 556 int i;
@@ -552,7 +565,7 @@ struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
552 "obtained\n"); 565 "obtained\n");
553 return tgt; 566 return tgt;
554 } else { 567 } else {
555 printk(KERN_ERR PFX "rport 0x%x " 568 BNX2FC_TGT_DBG(tgt, "rport 0x%x "
556 "is in DELETED state\n", 569 "is in DELETED state\n",
557 rdata->ids.port_id); 570 rdata->ids.port_id);
558 return NULL; 571 return NULL;
@@ -633,7 +646,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
633 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 646 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
634 &tgt->sq_dma, GFP_KERNEL); 647 &tgt->sq_dma, GFP_KERNEL);
635 if (!tgt->sq) { 648 if (!tgt->sq) {
636 printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n", 649 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
637 tgt->sq_mem_size); 650 tgt->sq_mem_size);
638 goto mem_alloc_failure; 651 goto mem_alloc_failure;
639 } 652 }
@@ -646,7 +659,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
646 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 659 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
647 &tgt->cq_dma, GFP_KERNEL); 660 &tgt->cq_dma, GFP_KERNEL);
648 if (!tgt->cq) { 661 if (!tgt->cq) {
649 printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n", 662 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
650 tgt->cq_mem_size); 663 tgt->cq_mem_size);
651 goto mem_alloc_failure; 664 goto mem_alloc_failure;
652 } 665 }
@@ -659,7 +672,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
659 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 672 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
660 &tgt->rq_dma, GFP_KERNEL); 673 &tgt->rq_dma, GFP_KERNEL);
661 if (!tgt->rq) { 674 if (!tgt->rq) {
662 printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n", 675 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
663 tgt->rq_mem_size); 676 tgt->rq_mem_size);
664 goto mem_alloc_failure; 677 goto mem_alloc_failure;
665 } 678 }
@@ -671,7 +684,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
671 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 684 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
672 &tgt->rq_pbl_dma, GFP_KERNEL); 685 &tgt->rq_pbl_dma, GFP_KERNEL);
673 if (!tgt->rq_pbl) { 686 if (!tgt->rq_pbl) {
674 printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n", 687 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
675 tgt->rq_pbl_size); 688 tgt->rq_pbl_size);
676 goto mem_alloc_failure; 689 goto mem_alloc_failure;
677 } 690 }
@@ -697,7 +710,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
697 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, 710 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
698 &tgt->xferq_dma, GFP_KERNEL); 711 &tgt->xferq_dma, GFP_KERNEL);
699 if (!tgt->xferq) { 712 if (!tgt->xferq) {
700 printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n", 713 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
701 tgt->xferq_mem_size); 714 tgt->xferq_mem_size);
702 goto mem_alloc_failure; 715 goto mem_alloc_failure;
703 } 716 }
@@ -711,7 +724,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
711 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, 724 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
712 &tgt->confq_dma, GFP_KERNEL); 725 &tgt->confq_dma, GFP_KERNEL);
713 if (!tgt->confq) { 726 if (!tgt->confq) {
714 printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n", 727 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
715 tgt->confq_mem_size); 728 tgt->confq_mem_size);
716 goto mem_alloc_failure; 729 goto mem_alloc_failure;
717 } 730 }
@@ -726,7 +739,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
726 tgt->confq_pbl_size, 739 tgt->confq_pbl_size,
727 &tgt->confq_pbl_dma, GFP_KERNEL); 740 &tgt->confq_pbl_dma, GFP_KERNEL);
728 if (!tgt->confq_pbl) { 741 if (!tgt->confq_pbl) {
729 printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n", 742 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
730 tgt->confq_pbl_size); 743 tgt->confq_pbl_size);
731 goto mem_alloc_failure; 744 goto mem_alloc_failure;
732 } 745 }
@@ -751,7 +764,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
751 tgt->conn_db_mem_size, 764 tgt->conn_db_mem_size,
752 &tgt->conn_db_dma, GFP_KERNEL); 765 &tgt->conn_db_dma, GFP_KERNEL);
753 if (!tgt->conn_db) { 766 if (!tgt->conn_db) {
754 printk(KERN_ALERT PFX "unable to allocate conn_db %d\n", 767 printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
755 tgt->conn_db_mem_size); 768 tgt->conn_db_mem_size);
756 goto mem_alloc_failure; 769 goto mem_alloc_failure;
757 } 770 }
@@ -767,7 +780,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
767 &tgt->lcq_dma, GFP_KERNEL); 780 &tgt->lcq_dma, GFP_KERNEL);
768 781
769 if (!tgt->lcq) { 782 if (!tgt->lcq) {
770 printk(KERN_ALERT PFX "unable to allocate lcq %d\n", 783 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
771 tgt->lcq_mem_size); 784 tgt->lcq_mem_size);
772 goto mem_alloc_failure; 785 goto mem_alloc_failure;
773 } 786 }
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 2e7c136bb805..27c9d65d54a9 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -128,25 +128,7 @@ struct c4_inquiry {
128 u8 reserved[2]; 128 u8 reserved[2];
129}; 129};
130 130
131struct rdac_controller { 131#define UNIQUE_ID_LEN 16
132 u8 subsys_id[SUBSYS_ID_LEN];
133 u8 slot_id[SLOT_ID_LEN];
134 int use_ms10;
135 struct kref kref;
136 struct list_head node; /* list of all controllers */
137 union {
138 struct rdac_pg_legacy legacy;
139 struct rdac_pg_expanded expanded;
140 } mode_select;
141 u8 index;
142 u8 array_name[ARRAY_LABEL_LEN];
143 spinlock_t ms_lock;
144 int ms_queued;
145 struct work_struct ms_work;
146 struct scsi_device *ms_sdev;
147 struct list_head ms_head;
148};
149
150struct c8_inquiry { 132struct c8_inquiry {
151 u8 peripheral_info; 133 u8 peripheral_info;
152 u8 page_code; /* 0xC8 */ 134 u8 page_code; /* 0xC8 */
@@ -159,12 +141,31 @@ struct c8_inquiry {
159 u8 vol_user_label_len; 141 u8 vol_user_label_len;
160 u8 vol_user_label[60]; 142 u8 vol_user_label[60];
161 u8 array_uniq_id_len; 143 u8 array_uniq_id_len;
162 u8 array_unique_id[16]; 144 u8 array_unique_id[UNIQUE_ID_LEN];
163 u8 array_user_label_len; 145 u8 array_user_label_len;
164 u8 array_user_label[60]; 146 u8 array_user_label[60];
165 u8 lun[8]; 147 u8 lun[8];
166}; 148};
167 149
150struct rdac_controller {
151 u8 array_id[UNIQUE_ID_LEN];
152 int use_ms10;
153 struct kref kref;
154 struct list_head node; /* list of all controllers */
155 union {
156 struct rdac_pg_legacy legacy;
157 struct rdac_pg_expanded expanded;
158 } mode_select;
159 u8 index;
160 u8 array_name[ARRAY_LABEL_LEN];
161 struct Scsi_Host *host;
162 spinlock_t ms_lock;
163 int ms_queued;
164 struct work_struct ms_work;
165 struct scsi_device *ms_sdev;
166 struct list_head ms_head;
167};
168
168struct c2_inquiry { 169struct c2_inquiry {
169 u8 peripheral_info; 170 u8 peripheral_info;
170 u8 page_code; /* 0xC2 */ 171 u8 page_code; /* 0xC2 */
@@ -369,16 +370,17 @@ static void release_controller(struct kref *kref)
369 kfree(ctlr); 370 kfree(ctlr);
370} 371}
371 372
372static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, 373static struct rdac_controller *get_controller(int index, char *array_name,
373 char *array_name) 374 u8 *array_id, struct scsi_device *sdev)
374{ 375{
375 struct rdac_controller *ctlr, *tmp; 376 struct rdac_controller *ctlr, *tmp;
376 377
377 spin_lock(&list_lock); 378 spin_lock(&list_lock);
378 379
379 list_for_each_entry(tmp, &ctlr_list, node) { 380 list_for_each_entry(tmp, &ctlr_list, node) {
380 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && 381 if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
381 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { 382 (tmp->index == index) &&
383 (tmp->host == sdev->host)) {
382 kref_get(&tmp->kref); 384 kref_get(&tmp->kref);
383 spin_unlock(&list_lock); 385 spin_unlock(&list_lock);
384 return tmp; 386 return tmp;
@@ -389,16 +391,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
389 goto done; 391 goto done;
390 392
391 /* initialize fields of controller */ 393 /* initialize fields of controller */
392 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); 394 memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
393 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); 395 ctlr->index = index;
396 ctlr->host = sdev->host;
394 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); 397 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
395 398
396 /* update the controller index */
397 if (slot_id[1] == 0x31)
398 ctlr->index = 0;
399 else
400 ctlr->index = 1;
401
402 kref_init(&ctlr->kref); 399 kref_init(&ctlr->kref);
403 ctlr->use_ms10 = -1; 400 ctlr->use_ms10 = -1;
404 ctlr->ms_queued = 0; 401 ctlr->ms_queued = 0;
@@ -444,7 +441,7 @@ done:
444} 441}
445 442
446static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, 443static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
447 char *array_name) 444 char *array_name, u8 *array_id)
448{ 445{
449 int err, i; 446 int err, i;
450 struct c8_inquiry *inqp; 447 struct c8_inquiry *inqp;
@@ -463,6 +460,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
463 *(array_name+i) = inqp->array_user_label[(2*i)+1]; 460 *(array_name+i) = inqp->array_user_label[(2*i)+1];
464 461
465 *(array_name+ARRAY_LABEL_LEN-1) = '\0'; 462 *(array_name+ARRAY_LABEL_LEN-1) = '\0';
463 memset(array_id, 0, UNIQUE_ID_LEN);
464 memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
466 } 465 }
467 return err; 466 return err;
468} 467}
@@ -504,16 +503,20 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
504} 503}
505 504
506static int initialize_controller(struct scsi_device *sdev, 505static int initialize_controller(struct scsi_device *sdev,
507 struct rdac_dh_data *h, char *array_name) 506 struct rdac_dh_data *h, char *array_name, u8 *array_id)
508{ 507{
509 int err; 508 int err, index;
510 struct c4_inquiry *inqp; 509 struct c4_inquiry *inqp;
511 510
512 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); 511 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
513 if (err == SCSI_DH_OK) { 512 if (err == SCSI_DH_OK) {
514 inqp = &h->inq.c4; 513 inqp = &h->inq.c4;
515 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id, 514 /* get the controller index */
516 array_name); 515 if (inqp->slot_id[1] == 0x31)
516 index = 0;
517 else
518 index = 1;
519 h->ctlr = get_controller(index, array_name, array_id, sdev);
517 if (!h->ctlr) 520 if (!h->ctlr)
518 err = SCSI_DH_RES_TEMP_UNAVAIL; 521 err = SCSI_DH_RES_TEMP_UNAVAIL;
519 } 522 }
@@ -835,6 +838,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
835 unsigned long flags; 838 unsigned long flags;
836 int err; 839 int err;
837 char array_name[ARRAY_LABEL_LEN]; 840 char array_name[ARRAY_LABEL_LEN];
841 char array_id[UNIQUE_ID_LEN];
838 842
839 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) 843 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
840 + sizeof(*h) , GFP_KERNEL); 844 + sizeof(*h) , GFP_KERNEL);
@@ -849,11 +853,11 @@ static int rdac_bus_attach(struct scsi_device *sdev)
849 h->lun = UNINITIALIZED_LUN; 853 h->lun = UNINITIALIZED_LUN;
850 h->state = RDAC_STATE_ACTIVE; 854 h->state = RDAC_STATE_ACTIVE;
851 855
852 err = get_lun_info(sdev, h, array_name); 856 err = get_lun_info(sdev, h, array_name, array_id);
853 if (err != SCSI_DH_OK) 857 if (err != SCSI_DH_OK)
854 goto failed; 858 goto failed;
855 859
856 err = initialize_controller(sdev, h, array_name); 860 err = initialize_controller(sdev, h, array_name, array_id);
857 if (err != SCSI_DH_OK) 861 if (err != SCSI_DH_OK)
858 goto failed; 862 goto failed;
859 863
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 204fa8d4b4ab..ba710e350ac5 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -487,6 +487,19 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
487} 487}
488 488
489/** 489/**
490 * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
491 * @port: The FCoE port
492 * @skb: The FIP/FCoE packet to be sent
493 */
494static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
495{
496 if (port->fcoe_pending_queue.qlen)
497 fcoe_check_wait_queue(port->lport, skb);
498 else if (fcoe_start_io(skb))
499 fcoe_check_wait_queue(port->lport, skb);
500}
501
502/**
490 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame 503 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
491 * @fip: The FCoE controller 504 * @fip: The FCoE controller
492 * @skb: The FIP packet to be sent 505 * @skb: The FIP packet to be sent
@@ -494,7 +507,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
494static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 507static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
495{ 508{
496 skb->dev = fcoe_from_ctlr(fip)->netdev; 509 skb->dev = fcoe_from_ctlr(fip)->netdev;
497 dev_queue_xmit(skb); 510 fcoe_port_send(lport_priv(fip->lp), skb);
498} 511}
499 512
500/** 513/**
@@ -1257,30 +1270,20 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
1257/** 1270/**
1258 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming 1271 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
1259 * command. 1272 * command.
1260 * @curr_cpu: CPU which received request
1261 * 1273 *
1262 * This routine selects next CPU based on cpumask. 1274 * This routine selects next CPU based on cpumask to distribute
1275 * incoming requests in round robin.
1263 * 1276 *
1264 * Returns: int (CPU number). Caller to verify if returned CPU is online or not. 1277 * Returns: int CPU number
1265 */ 1278 */
1266static unsigned int fcoe_select_cpu(unsigned int curr_cpu) 1279static inline unsigned int fcoe_select_cpu(void)
1267{ 1280{
1268 static unsigned int selected_cpu; 1281 static unsigned int selected_cpu;
1269 1282
1270 if (num_online_cpus() == 1) 1283 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1271 return curr_cpu; 1284 if (selected_cpu >= nr_cpu_ids)
1272 /* 1285 selected_cpu = cpumask_first(cpu_online_mask);
1273 * Doing following check, to skip "curr_cpu (smp_processor_id)" 1286
1274 * from selection of CPU is intentional. This is to avoid same CPU
1275 * doing post-processing of command. "curr_cpu" to just receive
1276 * incoming request in case where rx_id is UNKNOWN and all other
1277 * CPU to actually process the command(s)
1278 */
1279 do {
1280 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1281 if (selected_cpu >= nr_cpu_ids)
1282 selected_cpu = cpumask_first(cpu_online_mask);
1283 } while (selected_cpu == curr_cpu);
1284 return selected_cpu; 1287 return selected_cpu;
1285} 1288}
1286 1289
@@ -1350,30 +1353,26 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1350 1353
1351 fr = fcoe_dev_from_skb(skb); 1354 fr = fcoe_dev_from_skb(skb);
1352 fr->fr_dev = lport; 1355 fr->fr_dev = lport;
1353 fr->ptype = ptype;
1354 1356
1355 /* 1357 /*
1356 * In case the incoming frame's exchange is originated from 1358 * In case the incoming frame's exchange is originated from
1357 * the initiator, then received frame's exchange id is ANDed 1359 * the initiator, then received frame's exchange id is ANDed
1358 * with fc_cpu_mask bits to get the same cpu on which exchange 1360 * with fc_cpu_mask bits to get the same cpu on which exchange
1359 * was originated, otherwise just use the current cpu. 1361 * was originated, otherwise select cpu using rx exchange id
1362 * or fcoe_select_cpu().
1360 */ 1363 */
1361 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) 1364 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1362 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; 1365 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1363 else { 1366 else {
1364 cpu = smp_processor_id(); 1367 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
1365 1368 cpu = fcoe_select_cpu();
1366 if ((fh->fh_type == FC_TYPE_FCP) && 1369 else
1367 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
1368 do {
1369 cpu = fcoe_select_cpu(cpu);
1370 } while (!cpu_online(cpu));
1371 } else if ((fh->fh_type == FC_TYPE_FCP) &&
1372 (ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) {
1373 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; 1370 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
1374 } else
1375 cpu = smp_processor_id();
1376 } 1371 }
1372
1373 if (cpu >= nr_cpu_ids)
1374 goto err;
1375
1377 fps = &per_cpu(fcoe_percpu, cpu); 1376 fps = &per_cpu(fcoe_percpu, cpu);
1378 spin_lock_bh(&fps->fcoe_rx_list.lock); 1377 spin_lock_bh(&fps->fcoe_rx_list.lock);
1379 if (unlikely(!fps->thread)) { 1378 if (unlikely(!fps->thread)) {
@@ -1572,11 +1571,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1572 1571
1573 /* send down to lld */ 1572 /* send down to lld */
1574 fr_dev(fp) = lport; 1573 fr_dev(fp) = lport;
1575 if (port->fcoe_pending_queue.qlen) 1574 fcoe_port_send(port, skb);
1576 fcoe_check_wait_queue(lport, skb);
1577 else if (fcoe_start_io(skb))
1578 fcoe_check_wait_queue(lport, skb);
1579
1580 return 0; 1575 return 0;
1581} 1576}
1582 1577
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c6f99b1d2383..ec61bdb833ac 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1219,8 +1219,8 @@ static void complete_scsi_command(struct CommandList *cp)
1219 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1219 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1220 break; 1220 break;
1221 case CMD_UNSOLICITED_ABORT: 1221 case CMD_UNSOLICITED_ABORT:
1222 cmd->result = DID_RESET << 16; 1222 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1223 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " 1223 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1224 "abort\n", cp); 1224 "abort\n", cp);
1225 break; 1225 break;
1226 case CMD_TIMEOUT: 1226 case CMD_TIMEOUT:
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6d8dcd4dd06b..7f53ceaa7239 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -214,7 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h,
214 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 214 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
215 c->Header.Tag.lower); 215 c->Header.Tag.lower);
216 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 216 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
217 (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); 217 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
218 h->commands_outstanding++; 218 h->commands_outstanding++;
219 if (h->commands_outstanding > h->max_outstanding) 219 if (h->commands_outstanding > h->max_outstanding)
220 h->max_outstanding = h->commands_outstanding; 220 h->max_outstanding = h->commands_outstanding;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 888086c4e709..8d636301e32c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8778,14 +8778,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8778 if (rc != PCIBIOS_SUCCESSFUL) { 8778 if (rc != PCIBIOS_SUCCESSFUL) {
8779 dev_err(&pdev->dev, "Failed to save PCI config space\n"); 8779 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8780 rc = -EIO; 8780 rc = -EIO;
8781 goto cleanup_nomem; 8781 goto out_msi_disable;
8782 } 8782 }
8783 8783
8784 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 8784 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8785 goto cleanup_nomem; 8785 goto out_msi_disable;
8786 8786
8787 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8787 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8788 goto cleanup_nomem; 8788 goto out_msi_disable;
8789 8789
8790 if (ioa_cfg->sis64) 8790 if (ioa_cfg->sis64)
8791 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 8791 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
@@ -8800,7 +8800,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8800 if (rc < 0) { 8800 if (rc < 0) {
8801 dev_err(&pdev->dev, 8801 dev_err(&pdev->dev,
8802 "Couldn't allocate enough memory for device driver!\n"); 8802 "Couldn't allocate enough memory for device driver!\n");
8803 goto cleanup_nomem; 8803 goto out_msi_disable;
8804 } 8804 }
8805 8805
8806 /* 8806 /*
@@ -8845,10 +8845,10 @@ out:
8845 8845
8846cleanup_nolog: 8846cleanup_nolog:
8847 ipr_free_mem(ioa_cfg); 8847 ipr_free_mem(ioa_cfg);
8848cleanup_nomem:
8849 iounmap(ipr_regs);
8850out_msi_disable: 8848out_msi_disable:
8851 pci_disable_msi(pdev); 8849 pci_disable_msi(pdev);
8850cleanup_nomem:
8851 iounmap(ipr_regs);
8852out_release_regions: 8852out_release_regions:
8853 pci_release_regions(pdev); 8853 pci_release_regions(pdev);
8854out_scsi_host_put: 8854out_scsi_host_put:
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index f5a0665b6773..01ff082dc34c 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -802,10 +802,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
802 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); 802 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
803 spin_lock_bh(&pool->lock); 803 spin_lock_bh(&pool->lock);
804 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); 804 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
805 if (ep) { 805 if (ep && ep->xid == xid)
806 fc_exch_hold(ep); 806 fc_exch_hold(ep);
807 WARN_ON(ep->xid != xid);
808 }
809 spin_unlock_bh(&pool->lock); 807 spin_unlock_bh(&pool->lock);
810 } 808 }
811 return ep; 809 return ep;
@@ -2465,8 +2463,11 @@ int fc_setup_exch_mgr(void)
2465 2463
2466 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); 2464 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2467 if (!fc_exch_workqueue) 2465 if (!fc_exch_workqueue)
2468 return -ENOMEM; 2466 goto err;
2469 return 0; 2467 return 0;
2468err:
2469 kmem_cache_destroy(fc_em_cachep);
2470 return -ENOMEM;
2470} 2471}
2471 2472
2472/** 2473/**
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 9cd2149519ac..afb63c843144 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -498,7 +498,7 @@ crc_err:
498 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 498 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
499 stats->ErrorFrames++; 499 stats->ErrorFrames++;
500 /* per cpu count, not total count, but OK for limit */ 500 /* per cpu count, not total count, but OK for limit */
501 if (stats->InvalidCRCCount++ < 5) 501 if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
502 printk(KERN_WARNING "libfc: CRC error on data " 502 printk(KERN_WARNING "libfc: CRC error on data "
503 "frame for port (%6.6x)\n", 503 "frame for port (%6.6x)\n",
504 lport->port_id); 504 lport->port_id);
@@ -690,7 +690,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
690} 690}
691 691
692/** 692/**
693 * fc_fcp_abts_resp() - Send an ABTS response 693 * fc_fcp_abts_resp() - Receive an ABTS response
694 * @fsp: The FCP packet that is being aborted 694 * @fsp: The FCP packet that is being aborted
695 * @fp: The response frame 695 * @fp: The response frame
696 */ 696 */
@@ -730,7 +730,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
730} 730}
731 731
732/** 732/**
733 * fc_fcp_recv() - Reveive an FCP frame 733 * fc_fcp_recv() - Receive an FCP frame
734 * @seq: The sequence the frame is on 734 * @seq: The sequence the frame is on
735 * @fp: The received frame 735 * @fp: The received frame
736 * @arg: The related FCP packet 736 * @arg: The related FCP packet
@@ -1084,6 +1084,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1084 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); 1084 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1085 if (unlikely(rc)) { 1085 if (unlikely(rc)) {
1086 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1086 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1087 fsp->cmd->SCp.ptr = NULL;
1087 list_del(&fsp->list); 1088 list_del(&fsp->list);
1088 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1089 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1089 } 1090 }
@@ -1645,12 +1646,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1645 struct fc_seq *seq; 1646 struct fc_seq *seq;
1646 struct fcp_srr *srr; 1647 struct fcp_srr *srr;
1647 struct fc_frame *fp; 1648 struct fc_frame *fp;
1648 u8 cdb_op;
1649 unsigned int rec_tov; 1649 unsigned int rec_tov;
1650 1650
1651 rport = fsp->rport; 1651 rport = fsp->rport;
1652 rpriv = rport->dd_data; 1652 rpriv = rport->dd_data;
1653 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1654 1653
1655 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || 1654 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
1656 rpriv->rp_state != RPORT_ST_READY) 1655 rpriv->rp_state != RPORT_ST_READY)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e008b1673507..e55ed9cf23fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1352,7 +1352,6 @@ static void fc_lport_timeout(struct work_struct *work)
1352 WARN_ON(1); 1352 WARN_ON(1);
1353 break; 1353 break;
1354 case LPORT_ST_READY: 1354 case LPORT_ST_READY:
1355 WARN_ON(1);
1356 break; 1355 break;
1357 case LPORT_ST_RESET: 1356 case LPORT_ST_RESET:
1358 break; 1357 break;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 874e29d9533f..f84084bba2f0 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -849,6 +849,9 @@ static struct domain_device *sas_ex_discover_expander(
849 849
850 res = sas_discover_expander(child); 850 res = sas_discover_expander(child);
851 if (res) { 851 if (res) {
852 spin_lock_irq(&parent->port->dev_list_lock);
853 list_del(&child->dev_list_node);
854 spin_unlock_irq(&parent->port->dev_list_lock);
852 kfree(child); 855 kfree(child);
853 return NULL; 856 return NULL;
854 } 857 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8ec2c86a49d4..c088a36d1f33 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -20,6 +20,11 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <scsi/scsi_host.h> 22#include <scsi/scsi_host.h>
23
24#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
25#define CONFIG_SCSI_LPFC_DEBUG_FS
26#endif
27
23struct lpfc_sli2_slim; 28struct lpfc_sli2_slim;
24 29
25#define LPFC_PCI_DEV_LP 0x1 30#define LPFC_PCI_DEV_LP 0x1
@@ -465,9 +470,10 @@ enum intr_type_t {
465struct unsol_rcv_ct_ctx { 470struct unsol_rcv_ct_ctx {
466 uint32_t ctxt_id; 471 uint32_t ctxt_id;
467 uint32_t SID; 472 uint32_t SID;
468 uint32_t oxid;
469 uint32_t flags; 473 uint32_t flags;
470#define UNSOL_VALID 0x00000001 474#define UNSOL_VALID 0x00000001
475 uint16_t oxid;
476 uint16_t rxid;
471}; 477};
472 478
473#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/ 479#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
@@ -674,6 +680,9 @@ struct lpfc_hba {
674 uint32_t cfg_enable_rrq; 680 uint32_t cfg_enable_rrq;
675 uint32_t cfg_topology; 681 uint32_t cfg_topology;
676 uint32_t cfg_link_speed; 682 uint32_t cfg_link_speed;
683#define LPFC_FCF_FOV 1 /* Fast fcf failover */
684#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
685 uint32_t cfg_fcf_failover_policy;
677 uint32_t cfg_cr_delay; 686 uint32_t cfg_cr_delay;
678 uint32_t cfg_cr_count; 687 uint32_t cfg_cr_count;
679 uint32_t cfg_multi_ring_support; 688 uint32_t cfg_multi_ring_support;
@@ -845,9 +854,13 @@ struct lpfc_hba {
845 /* iDiag debugfs sub-directory */ 854 /* iDiag debugfs sub-directory */
846 struct dentry *idiag_root; 855 struct dentry *idiag_root;
847 struct dentry *idiag_pci_cfg; 856 struct dentry *idiag_pci_cfg;
857 struct dentry *idiag_bar_acc;
848 struct dentry *idiag_que_info; 858 struct dentry *idiag_que_info;
849 struct dentry *idiag_que_acc; 859 struct dentry *idiag_que_acc;
850 struct dentry *idiag_drb_acc; 860 struct dentry *idiag_drb_acc;
861 struct dentry *idiag_ctl_acc;
862 struct dentry *idiag_mbx_acc;
863 struct dentry *idiag_ext_acc;
851#endif 864#endif
852 865
853 /* Used for deferred freeing of ELS data buffers */ 866 /* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 135a53baa735..2542f1f8bf86 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,47 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
755} 755}
756 756
757/** 757/**
758 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
759 * @phba: lpfc_hba pointer.
760 *
761 * Description:
762 * SLI4 interface type-2 device to wait on the sliport status register for
763 * the readyness after performing a firmware reset.
764 *
765 * Returns:
766 * zero for success
767 **/
768static int
769lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
770{
771 struct lpfc_register portstat_reg;
772 int i;
773
774
775 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
776 &portstat_reg.word0);
777
778 /* wait for the SLI port firmware ready after firmware reset */
779 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
780 msleep(10);
781 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
782 &portstat_reg.word0);
783 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
784 continue;
785 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
786 continue;
787 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
788 continue;
789 break;
790 }
791
792 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
793 return 0;
794 else
795 return -EIO;
796}
797
798/**
758 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc 799 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
759 * @phba: lpfc_hba pointer. 800 * @phba: lpfc_hba pointer.
760 * 801 *
@@ -769,6 +810,7 @@ static ssize_t
769lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) 810lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
770{ 811{
771 struct completion online_compl; 812 struct completion online_compl;
813 struct pci_dev *pdev = phba->pcidev;
772 uint32_t reg_val; 814 uint32_t reg_val;
773 int status = 0; 815 int status = 0;
774 int rc; 816 int rc;
@@ -781,6 +823,14 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
781 LPFC_SLI_INTF_IF_TYPE_2)) 823 LPFC_SLI_INTF_IF_TYPE_2))
782 return -EPERM; 824 return -EPERM;
783 825
826 if (!pdev->is_physfn)
827 return -EPERM;
828
829 /* Disable SR-IOV virtual functions if enabled */
830 if (phba->cfg_sriov_nr_virtfn) {
831 pci_disable_sriov(pdev);
832 phba->cfg_sriov_nr_virtfn = 0;
833 }
784 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 834 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
785 835
786 if (status != 0) 836 if (status != 0)
@@ -805,7 +855,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
805 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 855 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
806 856
807 /* delay driver action following IF_TYPE_2 reset */ 857 /* delay driver action following IF_TYPE_2 reset */
808 msleep(100); 858 rc = lpfc_sli4_pdev_status_reg_wait(phba);
859
860 if (rc)
861 return -EIO;
809 862
810 init_completion(&online_compl); 863 init_completion(&online_compl);
811 rc = lpfc_workq_post_event(phba, &status, &online_compl, 864 rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -895,6 +948,10 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
895 948
896 if (!phba->cfg_enable_hba_reset) 949 if (!phba->cfg_enable_hba_reset)
897 return -EACCES; 950 return -EACCES;
951
952 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
953 "3050 lpfc_board_mode set to %s\n", buf);
954
898 init_completion(&online_compl); 955 init_completion(&online_compl);
899 956
900 if(strncmp(buf, "online", sizeof("online") - 1) == 0) { 957 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
@@ -1290,6 +1347,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1290 if (phba->sli_rev == LPFC_SLI_REV4) 1347 if (phba->sli_rev == LPFC_SLI_REV4)
1291 val = 0; 1348 val = 0;
1292 1349
1350 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1351 "3051 lpfc_poll changed from %d to %d\n",
1352 phba->cfg_poll, val);
1353
1293 spin_lock_irq(&phba->hbalock); 1354 spin_lock_irq(&phba->hbalock);
1294 1355
1295 old_val = phba->cfg_poll; 1356 old_val = phba->cfg_poll;
@@ -1414,80 +1475,10 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
1414 struct Scsi_Host *shost = class_to_shost(dev); 1475 struct Scsi_Host *shost = class_to_shost(dev);
1415 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1476 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1416 struct lpfc_hba *phba = vport->phba; 1477 struct lpfc_hba *phba = vport->phba;
1417 struct pci_dev *pdev = phba->pcidev; 1478 uint16_t max_nr_virtfn;
1418 union lpfc_sli4_cfg_shdr *shdr;
1419 uint32_t shdr_status, shdr_add_status;
1420 LPFC_MBOXQ_t *mboxq;
1421 struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
1422 struct lpfc_rsrc_desc_pcie *desc;
1423 uint32_t max_nr_virtfn;
1424 uint32_t desc_count;
1425 int length, rc, i;
1426
1427 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1428 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1429 LPFC_SLI_INTF_IF_TYPE_2))
1430 return -EPERM;
1431
1432 if (!pdev->is_physfn)
1433 return snprintf(buf, PAGE_SIZE, "%d\n", 0);
1434
1435 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1436 if (!mboxq)
1437 return -ENOMEM;
1438
1439 /* get the maximum number of virtfn support by physfn */
1440 length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
1441 sizeof(struct lpfc_sli4_cfg_mhdr));
1442 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
1443 LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
1444 length, LPFC_SLI4_MBX_EMBED);
1445 shdr = (union lpfc_sli4_cfg_shdr *)
1446 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
1447 bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
1448 phba->sli4_hba.iov.pf_number + 1);
1449
1450 get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
1451 bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
1452 LPFC_CFG_TYPE_CURRENT_ACTIVE);
1453
1454 rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
1455 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
1456
1457 if (rc != MBX_TIMEOUT) {
1458 /* check return status */
1459 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1460 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1461 &shdr->response);
1462 if (shdr_status || shdr_add_status || rc)
1463 goto error_out;
1464
1465 } else
1466 goto error_out;
1467
1468 desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
1469
1470 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
1471 desc = (struct lpfc_rsrc_desc_pcie *)
1472 &get_prof_cfg->u.response.prof_cfg.desc[i];
1473 if (LPFC_RSRC_DESC_TYPE_PCIE ==
1474 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
1475 max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
1476 desc);
1477 break;
1478 }
1479 }
1480
1481 if (i < LPFC_RSRC_DESC_MAX_NUM) {
1482 if (rc != MBX_TIMEOUT)
1483 mempool_free(mboxq, phba->mbox_mem_pool);
1484 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1485 }
1486 1479
1487error_out: 1480 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
1488 if (rc != MBX_TIMEOUT) 1481 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1489 mempool_free(mboxq, phba->mbox_mem_pool);
1490 return -EIO;
1491} 1482}
1492 1483
1493/** 1484/**
@@ -1605,6 +1596,9 @@ static int \
1605lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ 1596lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
1606{ \ 1597{ \
1607 if (val >= minval && val <= maxval) {\ 1598 if (val >= minval && val <= maxval) {\
1599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
1600 "3052 lpfc_" #attr " changed from %d to %d\n", \
1601 phba->cfg_##attr, val); \
1608 phba->cfg_##attr = val;\ 1602 phba->cfg_##attr = val;\
1609 return 0;\ 1603 return 0;\
1610 }\ 1604 }\
@@ -1762,6 +1756,9 @@ static int \
1762lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ 1756lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
1763{ \ 1757{ \
1764 if (val >= minval && val <= maxval) {\ 1758 if (val >= minval && val <= maxval) {\
1759 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
1760 "3053 lpfc_" #attr " changed from %d to %d\n", \
1761 vport->cfg_##attr, val); \
1765 vport->cfg_##attr = val;\ 1762 vport->cfg_##attr = val;\
1766 return 0;\ 1763 return 0;\
1767 }\ 1764 }\
@@ -2196,6 +2193,9 @@ lpfc_param_show(enable_npiv);
2196lpfc_param_init(enable_npiv, 1, 0, 1); 2193lpfc_param_init(enable_npiv, 1, 0, 1);
2197static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); 2194static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
2198 2195
2196LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
2197 "FCF Fast failover=1 Priority failover=2");
2198
2199int lpfc_enable_rrq; 2199int lpfc_enable_rrq;
2200module_param(lpfc_enable_rrq, int, S_IRUGO); 2200module_param(lpfc_enable_rrq, int, S_IRUGO);
2201MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); 2201MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
@@ -2678,6 +2678,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
2678 if (nolip) 2678 if (nolip)
2679 return strlen(buf); 2679 return strlen(buf);
2680 2680
2681 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2682 "3054 lpfc_topology changed from %d to %d\n",
2683 prev_val, val);
2681 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 2684 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
2682 if (err) { 2685 if (err) {
2683 phba->cfg_topology = prev_val; 2686 phba->cfg_topology = prev_val;
@@ -3101,6 +3104,10 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
3101 if (sscanf(val_buf, "%i", &val) != 1) 3104 if (sscanf(val_buf, "%i", &val) != 1)
3102 return -EINVAL; 3105 return -EINVAL;
3103 3106
3107 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3108 "3055 lpfc_link_speed changed from %d to %d %s\n",
3109 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
3110
3104 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 3111 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
3105 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 3112 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
3106 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 3113 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
@@ -3678,7 +3685,9 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
3678# - Default will result in registering capabilities for all profiles. 3685# - Default will result in registering capabilities for all profiles.
3679# 3686#
3680*/ 3687*/
3681unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION; 3688unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
3689 SHOST_DIX_TYPE0_PROTECTION |
3690 SHOST_DIX_TYPE1_PROTECTION;
3682 3691
3683module_param(lpfc_prot_mask, uint, S_IRUGO); 3692module_param(lpfc_prot_mask, uint, S_IRUGO);
3684MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); 3693MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
@@ -3769,6 +3778,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3769 &dev_attr_lpfc_fdmi_on, 3778 &dev_attr_lpfc_fdmi_on,
3770 &dev_attr_lpfc_max_luns, 3779 &dev_attr_lpfc_max_luns,
3771 &dev_attr_lpfc_enable_npiv, 3780 &dev_attr_lpfc_enable_npiv,
3781 &dev_attr_lpfc_fcf_failover_policy,
3772 &dev_attr_lpfc_enable_rrq, 3782 &dev_attr_lpfc_enable_rrq,
3773 &dev_attr_nport_evt_cnt, 3783 &dev_attr_nport_evt_cnt,
3774 &dev_attr_board_mode, 3784 &dev_attr_board_mode,
@@ -4989,6 +4999,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4989 lpfc_link_speed_init(phba, lpfc_link_speed); 4999 lpfc_link_speed_init(phba, lpfc_link_speed);
4990 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 5000 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4991 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 5001 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
5002 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
4992 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 5003 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
4993 lpfc_use_msi_init(phba, lpfc_use_msi); 5004 lpfc_use_msi_init(phba, lpfc_use_msi);
4994 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5005 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7fb0ba4cbfa7..6760c69f5253 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -42,6 +42,7 @@
42#include "lpfc.h" 42#include "lpfc.h"
43#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h" 44#include "lpfc_crtn.h"
45#include "lpfc_debugfs.h"
45#include "lpfc_vport.h" 46#include "lpfc_vport.h"
46#include "lpfc_version.h" 47#include "lpfc_version.h"
47 48
@@ -960,8 +961,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
960 evt_dat->immed_dat].oxid, 961 evt_dat->immed_dat].oxid,
961 phba->ct_ctx[ 962 phba->ct_ctx[
962 evt_dat->immed_dat].SID); 963 evt_dat->immed_dat].SID);
964 phba->ct_ctx[evt_dat->immed_dat].rxid =
965 piocbq->iocb.ulpContext;
963 phba->ct_ctx[evt_dat->immed_dat].oxid = 966 phba->ct_ctx[evt_dat->immed_dat].oxid =
964 piocbq->iocb.ulpContext; 967 piocbq->iocb.unsli3.rcvsli3.ox_id;
965 phba->ct_ctx[evt_dat->immed_dat].SID = 968 phba->ct_ctx[evt_dat->immed_dat].SID =
966 piocbq->iocb.un.rcvels.remoteID; 969 piocbq->iocb.un.rcvels.remoteID;
967 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; 970 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
@@ -1312,7 +1315,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1312 rc = IOCB_ERROR; 1315 rc = IOCB_ERROR;
1313 goto issue_ct_rsp_exit; 1316 goto issue_ct_rsp_exit;
1314 } 1317 }
1315 icmd->ulpContext = phba->ct_ctx[tag].oxid; 1318 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1319 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1316 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1320 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1317 if (!ndlp) { 1321 if (!ndlp) {
1318 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1322 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -1337,9 +1341,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1337 goto issue_ct_rsp_exit; 1341 goto issue_ct_rsp_exit;
1338 } 1342 }
1339 1343
1340 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1344 icmd->un.ulpWord[3] =
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 icmd->ulpContext =
1343 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1345 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1344 1346
1345 /* The exchange is done, mark the entry as invalid */ 1347 /* The exchange is done, mark the entry as invalid */
@@ -1351,8 +1353,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1351 1353
1352 /* Xmit CT response on exchange <xid> */ 1354 /* Xmit CT response on exchange <xid> */
1353 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1355 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1354 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n", 1356 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1355 icmd->ulpContext, icmd->ulpIoTag, phba->link_state); 1357 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1356 1358
1357 ctiocb->iocb_cmpl = NULL; 1359 ctiocb->iocb_cmpl = NULL;
1358 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1360 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
@@ -1471,13 +1473,12 @@ send_mgmt_rsp_exit:
1471/** 1473/**
1472 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1474 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1473 * @phba: Pointer to HBA context object. 1475 * @phba: Pointer to HBA context object.
1474 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1475 * 1476 *
1476 * This function is responsible for preparing driver for diag loopback 1477 * This function is responsible for preparing driver for diag loopback
1477 * on device. 1478 * on device.
1478 */ 1479 */
1479static int 1480static int
1480lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job) 1481lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1481{ 1482{
1482 struct lpfc_vport **vports; 1483 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost; 1484 struct Scsi_Host *shost;
@@ -1521,7 +1522,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
1521/** 1522/**
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1523 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object. 1524 * @phba: Pointer to HBA context object.
1524 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1525 * 1525 *
1526 * This function is responsible for driver exit processing of setting up 1526 * This function is responsible for driver exit processing of setting up
1527 * diag loopback mode on device. 1527 * diag loopback mode on device.
@@ -1567,7 +1567,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1567 uint32_t link_flags; 1567 uint32_t link_flags;
1568 uint32_t timeout; 1568 uint32_t timeout;
1569 LPFC_MBOXQ_t *pmboxq; 1569 LPFC_MBOXQ_t *pmboxq;
1570 int mbxstatus; 1570 int mbxstatus = MBX_SUCCESS;
1571 int i = 0; 1571 int i = 0;
1572 int rc = 0; 1572 int rc = 0;
1573 1573
@@ -1586,7 +1586,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1586 goto job_error; 1586 goto job_error;
1587 } 1587 }
1588 1588
1589 rc = lpfc_bsg_diag_mode_enter(phba, job); 1589 rc = lpfc_bsg_diag_mode_enter(phba);
1590 if (rc) 1590 if (rc)
1591 goto job_error; 1591 goto job_error;
1592 1592
@@ -1741,7 +1741,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1741 uint32_t link_flags, timeout, req_len, alloc_len; 1741 uint32_t link_flags, timeout, req_len, alloc_len;
1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1743 LPFC_MBOXQ_t *pmboxq = NULL; 1743 LPFC_MBOXQ_t *pmboxq = NULL;
1744 int mbxstatus, i, rc = 0; 1744 int mbxstatus = MBX_SUCCESS, i, rc = 0;
1745 1745
1746 /* no data to return just the return code */ 1746 /* no data to return just the return code */
1747 job->reply->reply_payload_rcv_len = 0; 1747 job->reply->reply_payload_rcv_len = 0;
@@ -1758,7 +1758,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1758 goto job_error; 1758 goto job_error;
1759 } 1759 }
1760 1760
1761 rc = lpfc_bsg_diag_mode_enter(phba, job); 1761 rc = lpfc_bsg_diag_mode_enter(phba);
1762 if (rc) 1762 if (rc)
1763 goto job_error; 1763 goto job_error;
1764 1764
@@ -1982,7 +1982,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1982 goto job_error; 1982 goto job_error;
1983 } 1983 }
1984 1984
1985 rc = lpfc_bsg_diag_mode_enter(phba, job); 1985 rc = lpfc_bsg_diag_mode_enter(phba);
1986 if (rc) 1986 if (rc)
1987 goto job_error; 1987 goto job_error;
1988 1988
@@ -3178,6 +3178,11 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3178 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3178 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3179 phba->mbox_ext_buf_ctx.nembType, 3179 phba->mbox_ext_buf_ctx.nembType,
3180 phba->mbox_ext_buf_ctx.mboxType, size); 3180 phba->mbox_ext_buf_ctx.mboxType, size);
3181 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3182 phba->mbox_ext_buf_ctx.nembType,
3183 phba->mbox_ext_buf_ctx.mboxType,
3184 dma_ebuf, sta_pos_addr,
3185 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3181 } else 3186 } else
3182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3187 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3183 3188
@@ -3430,6 +3435,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3430 "ext_buf_cnt:%d\n", ext_buf_cnt); 3435 "ext_buf_cnt:%d\n", ext_buf_cnt);
3431 } 3436 }
3432 3437
3438 /* before dma descriptor setup */
3439 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3440 sta_pre_addr, dmabuf, ext_buf_cnt);
3441
3433 /* reject non-embedded mailbox command with none external buffer */ 3442 /* reject non-embedded mailbox command with none external buffer */
3434 if (ext_buf_cnt == 0) { 3443 if (ext_buf_cnt == 0) {
3435 rc = -EPERM; 3444 rc = -EPERM;
@@ -3477,6 +3486,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3477 } 3486 }
3478 } 3487 }
3479 3488
3489 /* after dma descriptor setup */
3490 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3491 sta_pos_addr, dmabuf, ext_buf_cnt);
3492
3480 /* construct base driver mbox command */ 3493 /* construct base driver mbox command */
3481 pmb = &pmboxq->u.mb; 3494 pmb = &pmboxq->u.mb;
3482 pmbx = (uint8_t *)dmabuf->virt; 3495 pmbx = (uint8_t *)dmabuf->virt;
@@ -3511,7 +3524,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3511 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3524 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3512 "2947 Issued SLI_CONFIG ext-buffer " 3525 "2947 Issued SLI_CONFIG ext-buffer "
3513 "maibox command, rc:x%x\n", rc); 3526 "maibox command, rc:x%x\n", rc);
3514 return 1; 3527 return SLI_CONFIG_HANDLED;
3515 } 3528 }
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3529 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3517 "2948 Failed to issue SLI_CONFIG ext-buffer " 3530 "2948 Failed to issue SLI_CONFIG ext-buffer "
@@ -3549,7 +3562,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3549 LPFC_MBOXQ_t *pmboxq = NULL; 3562 LPFC_MBOXQ_t *pmboxq = NULL;
3550 MAILBOX_t *pmb; 3563 MAILBOX_t *pmb;
3551 uint8_t *mbx; 3564 uint8_t *mbx;
3552 int rc = 0, i; 3565 int rc = SLI_CONFIG_NOT_HANDLED, i;
3553 3566
3554 mbox_req = 3567 mbox_req =
3555 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 3568 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
@@ -3591,12 +3604,20 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3591 "ext_buf_cnt:%d\n", ext_buf_cnt); 3604 "ext_buf_cnt:%d\n", ext_buf_cnt);
3592 } 3605 }
3593 3606
3607 /* before dma buffer descriptor setup */
3608 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3609 sta_pre_addr, dmabuf, ext_buf_cnt);
3610
3594 if (ext_buf_cnt == 0) 3611 if (ext_buf_cnt == 0)
3595 return -EPERM; 3612 return -EPERM;
3596 3613
3597 /* for the first external buffer */ 3614 /* for the first external buffer */
3598 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3615 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3599 3616
3617 /* after dma descriptor setup */
3618 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3619 sta_pos_addr, dmabuf, ext_buf_cnt);
3620
3600 /* log for looking forward */ 3621 /* log for looking forward */
3601 for (i = 1; i < ext_buf_cnt; i++) { 3622 for (i = 1; i < ext_buf_cnt; i++) {
3602 if (nemb_tp == nemb_mse) 3623 if (nemb_tp == nemb_mse)
@@ -3660,7 +3681,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3660 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3681 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 "2955 Issued SLI_CONFIG ext-buffer " 3682 "2955 Issued SLI_CONFIG ext-buffer "
3662 "maibox command, rc:x%x\n", rc); 3683 "maibox command, rc:x%x\n", rc);
3663 return 1; 3684 return SLI_CONFIG_HANDLED;
3664 } 3685 }
3665 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3686 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3666 "2956 Failed to issue SLI_CONFIG ext-buffer " 3687 "2956 Failed to issue SLI_CONFIG ext-buffer "
@@ -3668,6 +3689,11 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3668 rc = -EPIPE; 3689 rc = -EPIPE;
3669 } 3690 }
3670 3691
3692 /* wait for additoinal external buffers */
3693 job->reply->result = 0;
3694 job->job_done(job);
3695 return SLI_CONFIG_HANDLED;
3696
3671job_error: 3697job_error:
3672 if (pmboxq) 3698 if (pmboxq)
3673 mempool_free(pmboxq, phba->mbox_mem_pool); 3699 mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -3840,6 +3866,12 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3840 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 3866 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3841 struct lpfc_dmabuf, list); 3867 struct lpfc_dmabuf, list);
3842 list_del_init(&dmabuf->list); 3868 list_del_init(&dmabuf->list);
3869
3870 /* after dma buffer descriptor setup */
3871 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
3872 mbox_rd, dma_ebuf, sta_pos_addr,
3873 dmabuf, index);
3874
3843 pbuf = (uint8_t *)dmabuf->virt; 3875 pbuf = (uint8_t *)dmabuf->virt;
3844 job->reply->reply_payload_rcv_len = 3876 job->reply->reply_payload_rcv_len =
3845 sg_copy_from_buffer(job->reply_payload.sg_list, 3877 sg_copy_from_buffer(job->reply_payload.sg_list,
@@ -3922,6 +3954,11 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3922 dmabuf); 3954 dmabuf);
3923 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3955 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3924 3956
3957 /* after write dma buffer */
3958 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
3959 mbox_wr, dma_ebuf, sta_pos_addr,
3960 dmabuf, index);
3961
3925 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 3962 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3926 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3963 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3927 "2968 SLI_CONFIG ext-buffer wr all %d " 3964 "2968 SLI_CONFIG ext-buffer wr all %d "
@@ -3959,7 +3996,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3959 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3996 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3960 "2969 Issued SLI_CONFIG ext-buffer " 3997 "2969 Issued SLI_CONFIG ext-buffer "
3961 "maibox command, rc:x%x\n", rc); 3998 "maibox command, rc:x%x\n", rc);
3962 return 1; 3999 return SLI_CONFIG_HANDLED;
3963 } 4000 }
3964 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4001 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3965 "2970 Failed to issue SLI_CONFIG ext-buffer " 4002 "2970 Failed to issue SLI_CONFIG ext-buffer "
@@ -4039,14 +4076,14 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4039 struct lpfc_dmabuf *dmabuf) 4076 struct lpfc_dmabuf *dmabuf)
4040{ 4077{
4041 struct dfc_mbox_req *mbox_req; 4078 struct dfc_mbox_req *mbox_req;
4042 int rc; 4079 int rc = SLI_CONFIG_NOT_HANDLED;
4043 4080
4044 mbox_req = 4081 mbox_req =
4045 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4082 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4046 4083
4047 /* mbox command with/without single external buffer */ 4084 /* mbox command with/without single external buffer */
4048 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4085 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4049 return SLI_CONFIG_NOT_HANDLED; 4086 return rc;
4050 4087
4051 /* mbox command and first external buffer */ 4088 /* mbox command and first external buffer */
4052 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4089 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
@@ -4249,7 +4286,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4249 * mailbox extension size 4286 * mailbox extension size
4250 */ 4287 */
4251 if ((transmit_length > receive_length) || 4288 if ((transmit_length > receive_length) ||
4252 (transmit_length > MAILBOX_EXT_SIZE)) { 4289 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4253 rc = -ERANGE; 4290 rc = -ERANGE;
4254 goto job_done; 4291 goto job_done;
4255 } 4292 }
@@ -4272,7 +4309,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4272 /* receive length cannot be greater than mailbox 4309 /* receive length cannot be greater than mailbox
4273 * extension size 4310 * extension size
4274 */ 4311 */
4275 if (receive_length > MAILBOX_EXT_SIZE) { 4312 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4276 rc = -ERANGE; 4313 rc = -ERANGE;
4277 goto job_done; 4314 goto job_done;
4278 } 4315 }
@@ -4306,7 +4343,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4306 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4343 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4307 4344
4308 /* bde size cannot be greater than mailbox ext size */ 4345 /* bde size cannot be greater than mailbox ext size */
4309 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { 4346 if (bde->tus.f.bdeSize >
4347 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4310 rc = -ERANGE; 4348 rc = -ERANGE;
4311 goto job_done; 4349 goto job_done;
4312 } 4350 }
@@ -4332,7 +4370,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4332 * mailbox extension size 4370 * mailbox extension size
4333 */ 4371 */
4334 if ((receive_length == 0) || 4372 if ((receive_length == 0) ||
4335 (receive_length > MAILBOX_EXT_SIZE)) { 4373 (receive_length >
4374 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4336 rc = -ERANGE; 4375 rc = -ERANGE;
4337 goto job_done; 4376 goto job_done;
4338 } 4377 }
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fc20c247f36b..a6db6aef1331 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -235,9 +235,11 @@ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
235void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); 235void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
236void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); 236void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
237uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 237uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
238void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
238int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 239int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
239void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 240void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
240int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); 241int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
242void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
241 243
242int lpfc_mem_alloc(struct lpfc_hba *, int align); 244int lpfc_mem_alloc(struct lpfc_hba *, int align);
243void lpfc_mem_free(struct lpfc_hba *); 245void lpfc_mem_free(struct lpfc_hba *);
@@ -371,6 +373,10 @@ extern struct lpfc_hbq_init *lpfc_hbq_defs[];
371/* SLI4 if_type 2 externs. */ 373/* SLI4 if_type 2 externs. */
372int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *); 374int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
373int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *); 375int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
376int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
377 uint16_t *, uint16_t *);
378int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
379 uint16_t *, uint16_t *);
374 380
375/* externs BlockGuard */ 381/* externs BlockGuard */
376extern char *_dump_buf_data; 382extern char *_dump_buf_data;
@@ -432,10 +438,16 @@ void lpfc_handle_rrq_active(struct lpfc_hba *);
432int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); 438int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
433int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, 439int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
434 uint16_t, uint16_t, uint16_t); 440 uint16_t, uint16_t, uint16_t);
441uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
435void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); 442void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
436void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); 443void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
437struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, 444struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
438 uint32_t); 445 uint32_t);
446void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
447 enum mbox_type, enum dma_type, enum sta_type,
448 struct lpfc_dmabuf *, uint32_t);
449void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
439int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *); 450int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
440/* functions to support SR-IOV */ 451/* functions to support SR-IOV */
441int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int); 452int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
453uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 30b25c5fdd7e..a0424dd90e40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -48,6 +48,7 @@
48#include "lpfc_version.h" 48#include "lpfc_version.h"
49#include "lpfc_compat.h" 49#include "lpfc_compat.h"
50#include "lpfc_debugfs.h" 50#include "lpfc_debugfs.h"
51#include "lpfc_bsg.h"
51 52
52#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 53#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
53/* 54/*
@@ -135,7 +136,11 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
135 int i, index, len, enable; 136 int i, index, len, enable;
136 uint32_t ms; 137 uint32_t ms;
137 struct lpfc_debugfs_trc *dtp; 138 struct lpfc_debugfs_trc *dtp;
138 char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; 139 char *buffer;
140
141 buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
142 if (!buffer)
143 return 0;
139 144
140 enable = lpfc_debugfs_enable; 145 enable = lpfc_debugfs_enable;
141 lpfc_debugfs_enable = 0; 146 lpfc_debugfs_enable = 0;
@@ -167,6 +172,8 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
167 } 172 }
168 173
169 lpfc_debugfs_enable = enable; 174 lpfc_debugfs_enable = enable;
175 kfree(buffer);
176
170 return len; 177 return len;
171} 178}
172 179
@@ -195,8 +202,11 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
195 int i, index, len, enable; 202 int i, index, len, enable;
196 uint32_t ms; 203 uint32_t ms;
197 struct lpfc_debugfs_trc *dtp; 204 struct lpfc_debugfs_trc *dtp;
198 char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; 205 char *buffer;
199 206
207 buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
208 if (!buffer)
209 return 0;
200 210
201 enable = lpfc_debugfs_enable; 211 enable = lpfc_debugfs_enable;
202 lpfc_debugfs_enable = 0; 212 lpfc_debugfs_enable = 0;
@@ -228,6 +238,8 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
228 } 238 }
229 239
230 lpfc_debugfs_enable = enable; 240 lpfc_debugfs_enable = enable;
241 kfree(buffer);
242
231 return len; 243 return len;
232} 244}
233 245
@@ -378,7 +390,11 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
378 int len = 0; 390 int len = 0;
379 int i, off; 391 int i, off;
380 uint32_t *ptr; 392 uint32_t *ptr;
381 char buffer[1024]; 393 char *buffer;
394
395 buffer = kmalloc(1024, GFP_KERNEL);
396 if (!buffer)
397 return 0;
382 398
383 off = 0; 399 off = 0;
384 spin_lock_irq(&phba->hbalock); 400 spin_lock_irq(&phba->hbalock);
@@ -407,6 +423,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
407 } 423 }
408 424
409 spin_unlock_irq(&phba->hbalock); 425 spin_unlock_irq(&phba->hbalock);
426 kfree(buffer);
427
410 return len; 428 return len;
411} 429}
412 430
@@ -1327,8 +1345,8 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
1327 return 0; 1345 return 0;
1328 1346
1329 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { 1347 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
1330 where = idiag.cmd.data[0]; 1348 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1331 count = idiag.cmd.data[1]; 1349 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1332 } else 1350 } else
1333 return 0; 1351 return 0;
1334 1352
@@ -1373,6 +1391,11 @@ pcicfg_browse:
1373 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, 1391 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
1374 "%08x ", u32val); 1392 "%08x ", u32val);
1375 offset += sizeof(uint32_t); 1393 offset += sizeof(uint32_t);
1394 if (offset >= LPFC_PCI_CFG_SIZE) {
1395 len += snprintf(pbuffer+len,
1396 LPFC_PCI_CFG_SIZE-len, "\n");
1397 break;
1398 }
1376 index -= sizeof(uint32_t); 1399 index -= sizeof(uint32_t);
1377 if (!index) 1400 if (!index)
1378 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, 1401 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
@@ -1385,8 +1408,11 @@ pcicfg_browse:
1385 } 1408 }
1386 1409
1387 /* Set up the offset for next portion of pci cfg read */ 1410 /* Set up the offset for next portion of pci cfg read */
1388 idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; 1411 if (index == 0) {
1389 if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) 1412 idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
1413 if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
1414 idiag.offset.last_rd = 0;
1415 } else
1390 idiag.offset.last_rd = 0; 1416 idiag.offset.last_rd = 0;
1391 1417
1392 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 1418 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -1439,8 +1465,8 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1439 if (rc != LPFC_PCI_CFG_RD_CMD_ARG) 1465 if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
1440 goto error_out; 1466 goto error_out;
1441 /* Read command from PCI config space, set up command fields */ 1467 /* Read command from PCI config space, set up command fields */
1442 where = idiag.cmd.data[0]; 1468 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1443 count = idiag.cmd.data[1]; 1469 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1444 if (count == LPFC_PCI_CFG_BROWSE) { 1470 if (count == LPFC_PCI_CFG_BROWSE) {
1445 if (where % sizeof(uint32_t)) 1471 if (where % sizeof(uint32_t))
1446 goto error_out; 1472 goto error_out;
@@ -1475,9 +1501,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1475 if (rc != LPFC_PCI_CFG_WR_CMD_ARG) 1501 if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
1476 goto error_out; 1502 goto error_out;
1477 /* Write command to PCI config space, read-modify-write */ 1503 /* Write command to PCI config space, read-modify-write */
1478 where = idiag.cmd.data[0]; 1504 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1479 count = idiag.cmd.data[1]; 1505 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1480 value = idiag.cmd.data[2]; 1506 value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX];
1481 /* Sanity checks */ 1507 /* Sanity checks */
1482 if ((count != sizeof(uint8_t)) && 1508 if ((count != sizeof(uint8_t)) &&
1483 (count != sizeof(uint16_t)) && 1509 (count != sizeof(uint16_t)) &&
@@ -1570,6 +1596,292 @@ error_out:
1570} 1596}
1571 1597
1572/** 1598/**
1599 * lpfc_idiag_baracc_read - idiag debugfs pci bar access read
1600 * @file: The file pointer to read from.
1601 * @buf: The buffer to copy the data to.
1602 * @nbytes: The number of bytes to read.
1603 * @ppos: The position in the file to start reading from.
1604 *
1605 * Description:
1606 * This routine reads data from the @phba pci bar memory mapped space
1607 * according to the idiag command, and copies to user @buf.
1608 *
1609 * Returns:
1610 * This function returns the amount of data that was read (this could be less
1611 * than @nbytes if the end of the file was reached) or a negative error value.
1612 **/
1613static ssize_t
1614lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
1615 loff_t *ppos)
1616{
1617 struct lpfc_debug *debug = file->private_data;
1618 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
1619 int offset_label, offset, offset_run, len = 0, index;
1620 int bar_num, acc_range, bar_size;
1621 char *pbuffer;
1622 void __iomem *mem_mapped_bar;
1623 uint32_t if_type;
1624 struct pci_dev *pdev;
1625 uint32_t u32val;
1626
1627 pdev = phba->pcidev;
1628 if (!pdev)
1629 return 0;
1630
1631 /* This is a user read operation */
1632 debug->op = LPFC_IDIAG_OP_RD;
1633
1634 if (!debug->buffer)
1635 debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL);
1636 if (!debug->buffer)
1637 return 0;
1638 pbuffer = debug->buffer;
1639
1640 if (*ppos)
1641 return 0;
1642
1643 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
1644 bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
1645 offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
1646 acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
1647 bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
1648 } else
1649 return 0;
1650
1651 if (acc_range == 0)
1652 return 0;
1653
1654 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1655 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1656 if (bar_num == IDIAG_BARACC_BAR_0)
1657 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1658 else if (bar_num == IDIAG_BARACC_BAR_1)
1659 mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
1660 else if (bar_num == IDIAG_BARACC_BAR_2)
1661 mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
1662 else
1663 return 0;
1664 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1665 if (bar_num == IDIAG_BARACC_BAR_0)
1666 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1667 else
1668 return 0;
1669 } else
1670 return 0;
1671
1672 /* Read single PCI bar space register */
1673 if (acc_range == SINGLE_WORD) {
1674 offset_run = offset;
1675 u32val = readl(mem_mapped_bar + offset_run);
1676 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1677 "%05x: %08x\n", offset_run, u32val);
1678 } else
1679 goto baracc_browse;
1680
1681 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1682
1683baracc_browse:
1684
1685 /* Browse all PCI bar space registers */
1686 offset_label = idiag.offset.last_rd;
1687 offset_run = offset_label;
1688
1689 /* Read PCI bar memory mapped space */
1690 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1691 "%05x: ", offset_label);
1692 index = LPFC_PCI_BAR_RD_SIZE;
1693 while (index > 0) {
1694 u32val = readl(mem_mapped_bar + offset_run);
1695 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1696 "%08x ", u32val);
1697 offset_run += sizeof(uint32_t);
1698 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1699 if (offset_run >= bar_size) {
1700 len += snprintf(pbuffer+len,
1701 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1702 break;
1703 }
1704 } else {
1705 if (offset_run >= offset +
1706 (acc_range * sizeof(uint32_t))) {
1707 len += snprintf(pbuffer+len,
1708 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1709 break;
1710 }
1711 }
1712 index -= sizeof(uint32_t);
1713 if (!index)
1714 len += snprintf(pbuffer+len,
1715 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1716 else if (!(index % (8 * sizeof(uint32_t)))) {
1717 offset_label += (8 * sizeof(uint32_t));
1718 len += snprintf(pbuffer+len,
1719 LPFC_PCI_BAR_RD_BUF_SIZE-len,
1720 "\n%05x: ", offset_label);
1721 }
1722 }
1723
1724 /* Set up the offset for next portion of pci bar read */
1725 if (index == 0) {
1726 idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE;
1727 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1728 if (idiag.offset.last_rd >= bar_size)
1729 idiag.offset.last_rd = 0;
1730 } else {
1731 if (offset_run >= offset +
1732 (acc_range * sizeof(uint32_t)))
1733 idiag.offset.last_rd = offset;
1734 }
1735 } else {
1736 if (acc_range == LPFC_PCI_BAR_BROWSE)
1737 idiag.offset.last_rd = 0;
1738 else
1739 idiag.offset.last_rd = offset;
1740 }
1741
1742 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1743}
1744
1745/**
1746 * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands
1747 * @file: The file pointer to read from.
1748 * @buf: The buffer to copy the user data from.
1749 * @nbytes: The number of bytes to get.
1750 * @ppos: The position in the file to start reading from.
1751 *
1752 * This routine get the debugfs idiag command struct from user space and
1753 * then perform the syntax check for PCI bar memory mapped space read or
1754 * write command accordingly. In the case of PCI bar memory mapped space
1755 * read command, it sets up the command in the idiag command struct for
1756 * the debugfs read operation. In the case of PCI bar memorpy mapped space
1757 * write operation, it executes the write operation into the PCI bar memory
1758 * mapped space accordingly.
1759 *
1760 * It returns the @nbytges passing in from debugfs user space when successful.
1761 * In case of error conditions, it returns proper error code back to the user
1762 * space.
1763 */
1764static ssize_t
1765lpfc_idiag_baracc_write(struct file *file, const char __user *buf,
1766 size_t nbytes, loff_t *ppos)
1767{
1768 struct lpfc_debug *debug = file->private_data;
1769 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
1770 uint32_t bar_num, bar_size, offset, value, acc_range;
1771 struct pci_dev *pdev;
1772 void __iomem *mem_mapped_bar;
1773 uint32_t if_type;
1774 uint32_t u32val;
1775 int rc;
1776
1777 pdev = phba->pcidev;
1778 if (!pdev)
1779 return -EFAULT;
1780
1781 /* This is a user write operation */
1782 debug->op = LPFC_IDIAG_OP_WR;
1783
1784 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
1785 if (rc < 0)
1786 return rc;
1787
1788 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1789 bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
1790
1791 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1792 if ((bar_num != IDIAG_BARACC_BAR_0) &&
1793 (bar_num != IDIAG_BARACC_BAR_1) &&
1794 (bar_num != IDIAG_BARACC_BAR_2))
1795 goto error_out;
1796 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1797 if (bar_num != IDIAG_BARACC_BAR_0)
1798 goto error_out;
1799 } else
1800 goto error_out;
1801
1802 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1803 if (bar_num == IDIAG_BARACC_BAR_0) {
1804 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1805 LPFC_PCI_IF0_BAR0_SIZE;
1806 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1807 } else if (bar_num == IDIAG_BARACC_BAR_1) {
1808 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1809 LPFC_PCI_IF0_BAR1_SIZE;
1810 mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
1811 } else if (bar_num == IDIAG_BARACC_BAR_2) {
1812 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1813 LPFC_PCI_IF0_BAR2_SIZE;
1814 mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
1815 } else
1816 goto error_out;
1817 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1818 if (bar_num == IDIAG_BARACC_BAR_0) {
1819 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1820 LPFC_PCI_IF2_BAR0_SIZE;
1821 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1822 } else
1823 goto error_out;
1824 } else
1825 goto error_out;
1826
1827 offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
1828 if (offset % sizeof(uint32_t))
1829 goto error_out;
1830
1831 bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
1832 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
1833 /* Sanity check on PCI config read command line arguments */
1834 if (rc != LPFC_PCI_BAR_RD_CMD_ARG)
1835 goto error_out;
1836 acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
1837 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1838 if (offset > bar_size - sizeof(uint32_t))
1839 goto error_out;
1840 /* Starting offset to browse */
1841 idiag.offset.last_rd = offset;
1842 } else if (acc_range > SINGLE_WORD) {
1843 if (offset + acc_range * sizeof(uint32_t) > bar_size)
1844 goto error_out;
1845 /* Starting offset to browse */
1846 idiag.offset.last_rd = offset;
1847 } else if (acc_range != SINGLE_WORD)
1848 goto error_out;
1849 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR ||
1850 idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST ||
1851 idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
1852 /* Sanity check on PCI bar write command line arguments */
1853 if (rc != LPFC_PCI_BAR_WR_CMD_ARG)
1854 goto error_out;
1855 /* Write command to PCI bar space, read-modify-write */
1856 acc_range = SINGLE_WORD;
1857 value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX];
1858 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) {
1859 writel(value, mem_mapped_bar + offset);
1860 readl(mem_mapped_bar + offset);
1861 }
1862 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) {
1863 u32val = readl(mem_mapped_bar + offset);
1864 u32val |= value;
1865 writel(u32val, mem_mapped_bar + offset);
1866 readl(mem_mapped_bar + offset);
1867 }
1868 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
1869 u32val = readl(mem_mapped_bar + offset);
1870 u32val &= ~value;
1871 writel(u32val, mem_mapped_bar + offset);
1872 readl(mem_mapped_bar + offset);
1873 }
1874 } else
1875 /* All other opecodes are illegal for now */
1876 goto error_out;
1877
1878 return nbytes;
1879error_out:
1880 memset(&idiag, 0, sizeof(idiag));
1881 return -EINVAL;
1882}
1883
1884/**
1573 * lpfc_idiag_queinfo_read - idiag debugfs read queue information 1885 * lpfc_idiag_queinfo_read - idiag debugfs read queue information
1574 * @file: The file pointer to read from. 1886 * @file: The file pointer to read from.
1575 * @buf: The buffer to copy the data to. 1887 * @buf: The buffer to copy the data to.
@@ -1871,8 +2183,8 @@ lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
1871 return 0; 2183 return 0;
1872 2184
1873 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { 2185 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
1874 index = idiag.cmd.data[2]; 2186 index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
1875 count = idiag.cmd.data[3]; 2187 count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
1876 pque = (struct lpfc_queue *)idiag.ptr_private; 2188 pque = (struct lpfc_queue *)idiag.ptr_private;
1877 } else 2189 } else
1878 return 0; 2190 return 0;
@@ -1944,12 +2256,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
1944 return rc; 2256 return rc;
1945 2257
1946 /* Get and sanity check on command feilds */ 2258 /* Get and sanity check on command feilds */
1947 quetp = idiag.cmd.data[0]; 2259 quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX];
1948 queid = idiag.cmd.data[1]; 2260 queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX];
1949 index = idiag.cmd.data[2]; 2261 index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
1950 count = idiag.cmd.data[3]; 2262 count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
1951 offset = idiag.cmd.data[4]; 2263 offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX];
1952 value = idiag.cmd.data[5]; 2264 value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX];
1953 2265
1954 /* Sanity check on command line arguments */ 2266 /* Sanity check on command line arguments */
1955 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || 2267 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
@@ -2218,7 +2530,7 @@ lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
2218 return 0; 2530 return 0;
2219 2531
2220 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) 2532 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
2221 drb_reg_id = idiag.cmd.data[0]; 2533 drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
2222 else 2534 else
2223 return 0; 2535 return 0;
2224 2536
@@ -2257,7 +2569,7 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
2257{ 2569{
2258 struct lpfc_debug *debug = file->private_data; 2570 struct lpfc_debug *debug = file->private_data;
2259 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; 2571 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2260 uint32_t drb_reg_id, value, reg_val; 2572 uint32_t drb_reg_id, value, reg_val = 0;
2261 void __iomem *drb_reg; 2573 void __iomem *drb_reg;
2262 int rc; 2574 int rc;
2263 2575
@@ -2269,8 +2581,8 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
2269 return rc; 2581 return rc;
2270 2582
2271 /* Sanity check on command line arguments */ 2583 /* Sanity check on command line arguments */
2272 drb_reg_id = idiag.cmd.data[0]; 2584 drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
2273 value = idiag.cmd.data[1]; 2585 value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX];
2274 2586
2275 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || 2587 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
2276 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || 2588 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
@@ -2330,6 +2642,679 @@ error_out:
2330 return -EINVAL; 2642 return -EINVAL;
2331} 2643}
2332 2644
2645/**
2646 * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
2647 * @phba: The pointer to hba structure.
2648 * @pbuffer: The pointer to the buffer to copy the data to.
2649 * @len: The lenght of bytes to copied.
2650 * @drbregid: The id to doorbell registers.
2651 *
2652 * Description:
2653 * This routine reads a control register and copies its content to the
2654 * user buffer pointed to by @pbuffer.
2655 *
2656 * Returns:
2657 * This function returns the amount of data that was copied into @pbuffer.
2658 **/
2659static int
2660lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
2661 int len, uint32_t ctlregid)
2662{
2663
2664 if (!pbuffer)
2665 return 0;
2666
2667 switch (ctlregid) {
2668 case LPFC_CTL_PORT_SEM:
2669 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2670 "Port SemReg: 0x%08x\n",
2671 readl(phba->sli4_hba.conf_regs_memmap_p +
2672 LPFC_CTL_PORT_SEM_OFFSET));
2673 break;
2674 case LPFC_CTL_PORT_STA:
2675 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2676 "Port StaReg: 0x%08x\n",
2677 readl(phba->sli4_hba.conf_regs_memmap_p +
2678 LPFC_CTL_PORT_STA_OFFSET));
2679 break;
2680 case LPFC_CTL_PORT_CTL:
2681 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2682 "Port CtlReg: 0x%08x\n",
2683 readl(phba->sli4_hba.conf_regs_memmap_p +
2684 LPFC_CTL_PORT_CTL_OFFSET));
2685 break;
2686 case LPFC_CTL_PORT_ER1:
2687 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2688 "Port Er1Reg: 0x%08x\n",
2689 readl(phba->sli4_hba.conf_regs_memmap_p +
2690 LPFC_CTL_PORT_ER1_OFFSET));
2691 break;
2692 case LPFC_CTL_PORT_ER2:
2693 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2694 "Port Er2Reg: 0x%08x\n",
2695 readl(phba->sli4_hba.conf_regs_memmap_p +
2696 LPFC_CTL_PORT_ER2_OFFSET));
2697 break;
2698 case LPFC_CTL_PDEV_CTL:
2699 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2700 "PDev CtlReg: 0x%08x\n",
2701 readl(phba->sli4_hba.conf_regs_memmap_p +
2702 LPFC_CTL_PDEV_CTL_OFFSET));
2703 break;
2704 default:
2705 break;
2706 }
2707 return len;
2708}
2709
2710/**
2711 * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register
2712 * @file: The file pointer to read from.
2713 * @buf: The buffer to copy the data to.
2714 * @nbytes: The number of bytes to read.
2715 * @ppos: The position in the file to start reading from.
2716 *
2717 * Description:
2718 * This routine reads data from the @phba port and device registers according
2719 * to the idiag command, and copies to user @buf.
2720 *
2721 * Returns:
2722 * This function returns the amount of data that was read (this could be less
2723 * than @nbytes if the end of the file was reached) or a negative error value.
2724 **/
2725static ssize_t
2726lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes,
2727 loff_t *ppos)
2728{
2729 struct lpfc_debug *debug = file->private_data;
2730 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2731 uint32_t ctl_reg_id, i;
2732 char *pbuffer;
2733 int len = 0;
2734
2735 /* This is a user read operation */
2736 debug->op = LPFC_IDIAG_OP_RD;
2737
2738 if (!debug->buffer)
2739 debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL);
2740 if (!debug->buffer)
2741 return 0;
2742 pbuffer = debug->buffer;
2743
2744 if (*ppos)
2745 return 0;
2746
2747 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD)
2748 ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
2749 else
2750 return 0;
2751
2752 if (ctl_reg_id == LPFC_CTL_ACC_ALL)
2753 for (i = 1; i <= LPFC_CTL_MAX; i++)
2754 len = lpfc_idiag_ctlacc_read_reg(phba,
2755 pbuffer, len, i);
2756 else
2757 len = lpfc_idiag_ctlacc_read_reg(phba,
2758 pbuffer, len, ctl_reg_id);
2759
2760 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2761}
2762
2763/**
2764 * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands
2765 * @file: The file pointer to read from.
2766 * @buf: The buffer to copy the user data from.
2767 * @nbytes: The number of bytes to get.
2768 * @ppos: The position in the file to start reading from.
2769 *
2770 * This routine get the debugfs idiag command struct from user space and then
2771 * perform the syntax check for port and device control register read (dump)
2772 * or write (set) command accordingly.
2773 *
2774 * It returns the @nbytges passing in from debugfs user space when successful.
2775 * In case of error conditions, it returns proper error code back to the user
2776 * space.
2777 **/
2778static ssize_t
2779lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf,
2780 size_t nbytes, loff_t *ppos)
2781{
2782 struct lpfc_debug *debug = file->private_data;
2783 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2784 uint32_t ctl_reg_id, value, reg_val = 0;
2785 void __iomem *ctl_reg;
2786 int rc;
2787
2788 /* This is a user write operation */
2789 debug->op = LPFC_IDIAG_OP_WR;
2790
2791 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
2792 if (rc < 0)
2793 return rc;
2794
2795 /* Sanity check on command line arguments */
2796 ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
2797 value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX];
2798
2799 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
2800 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
2801 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2802 if (rc != LPFC_CTL_ACC_WR_CMD_ARG)
2803 goto error_out;
2804 if (ctl_reg_id > LPFC_CTL_MAX)
2805 goto error_out;
2806 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) {
2807 if (rc != LPFC_CTL_ACC_RD_CMD_ARG)
2808 goto error_out;
2809 if ((ctl_reg_id > LPFC_CTL_MAX) &&
2810 (ctl_reg_id != LPFC_CTL_ACC_ALL))
2811 goto error_out;
2812 } else
2813 goto error_out;
2814
2815 /* Perform the write access operation */
2816 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
2817 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
2818 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2819 switch (ctl_reg_id) {
2820 case LPFC_CTL_PORT_SEM:
2821 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2822 LPFC_CTL_PORT_SEM_OFFSET;
2823 break;
2824 case LPFC_CTL_PORT_STA:
2825 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2826 LPFC_CTL_PORT_STA_OFFSET;
2827 break;
2828 case LPFC_CTL_PORT_CTL:
2829 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2830 LPFC_CTL_PORT_CTL_OFFSET;
2831 break;
2832 case LPFC_CTL_PORT_ER1:
2833 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2834 LPFC_CTL_PORT_ER1_OFFSET;
2835 break;
2836 case LPFC_CTL_PORT_ER2:
2837 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2838 LPFC_CTL_PORT_ER2_OFFSET;
2839 break;
2840 case LPFC_CTL_PDEV_CTL:
2841 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2842 LPFC_CTL_PDEV_CTL_OFFSET;
2843 break;
2844 default:
2845 goto error_out;
2846 }
2847
2848 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR)
2849 reg_val = value;
2850 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) {
2851 reg_val = readl(ctl_reg);
2852 reg_val |= value;
2853 }
2854 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2855 reg_val = readl(ctl_reg);
2856 reg_val &= ~value;
2857 }
2858 writel(reg_val, ctl_reg);
2859 readl(ctl_reg); /* flush */
2860 }
2861 return nbytes;
2862
2863error_out:
2864 /* Clean out command structure on command error out */
2865 memset(&idiag, 0, sizeof(idiag));
2866 return -EINVAL;
2867}
2868
2869/**
2870 * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup
2871 * @phba: Pointer to HBA context object.
2872 * @pbuffer: Pointer to data buffer.
2873 *
2874 * Description:
2875 * This routine gets the driver mailbox access debugfs setup information.
2876 *
2877 * Returns:
2878 * This function returns the amount of data that was read (this could be less
2879 * than @nbytes if the end of the file was reached) or a negative error value.
2880 **/
2881static int
2882lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
2883{
2884 uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
2885 int len = 0;
2886
2887 mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
2888 mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
2889 mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
2890 mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
2891
2892 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2893 "mbx_dump_map: 0x%08x\n", mbx_dump_map);
2894 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2895 "mbx_dump_cnt: %04d\n", mbx_dump_cnt);
2896 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2897 "mbx_word_cnt: %04d\n", mbx_word_cnt);
2898 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2899 "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
2900
2901 return len;
2902}
2903
2904/**
2905 * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access
2906 * @file: The file pointer to read from.
2907 * @buf: The buffer to copy the data to.
2908 * @nbytes: The number of bytes to read.
2909 * @ppos: The position in the file to start reading from.
2910 *
2911 * Description:
2912 * This routine reads data from the @phba driver mailbox access debugfs setup
2913 * information.
2914 *
2915 * Returns:
2916 * This function returns the amount of data that was read (this could be less
2917 * than @nbytes if the end of the file was reached) or a negative error value.
2918 **/
2919static ssize_t
2920lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes,
2921 loff_t *ppos)
2922{
2923 struct lpfc_debug *debug = file->private_data;
2924 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2925 char *pbuffer;
2926 int len = 0;
2927
2928 /* This is a user read operation */
2929 debug->op = LPFC_IDIAG_OP_RD;
2930
2931 if (!debug->buffer)
2932 debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL);
2933 if (!debug->buffer)
2934 return 0;
2935 pbuffer = debug->buffer;
2936
2937 if (*ppos)
2938 return 0;
2939
2940 if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) &&
2941 (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP))
2942 return 0;
2943
2944 len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer);
2945
2946 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2947}
2948
2949/**
2950 * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands
2951 * @file: The file pointer to read from.
2952 * @buf: The buffer to copy the user data from.
2953 * @nbytes: The number of bytes to get.
2954 * @ppos: The position in the file to start reading from.
2955 *
2956 * This routine get the debugfs idiag command struct from user space and then
2957 * perform the syntax check for driver mailbox command (dump) and sets up the
2958 * necessary states in the idiag command struct accordingly.
2959 *
2960 * It returns the @nbytges passing in from debugfs user space when successful.
2961 * In case of error conditions, it returns proper error code back to the user
2962 * space.
2963 **/
2964static ssize_t
2965lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf,
2966 size_t nbytes, loff_t *ppos)
2967{
2968 struct lpfc_debug *debug = file->private_data;
2969 uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
2970 int rc;
2971
2972 /* This is a user write operation */
2973 debug->op = LPFC_IDIAG_OP_WR;
2974
2975 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
2976 if (rc < 0)
2977 return rc;
2978
2979 /* Sanity check on command line arguments */
2980 mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
2981 mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
2982 mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
2983 mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
2984
2985 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) {
2986 if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL))
2987 goto error_out;
2988 if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) &&
2989 (mbx_dump_map != LPFC_MBX_DMP_ALL))
2990 goto error_out;
2991 if (mbx_word_cnt > sizeof(MAILBOX_t))
2992 goto error_out;
2993 } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) {
2994 if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL))
2995 goto error_out;
2996 if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) &&
2997 (mbx_dump_map != LPFC_MBX_DMP_ALL))
2998 goto error_out;
2999 if (mbx_word_cnt > (BSG_MBOX_SIZE)/4)
3000 goto error_out;
3001 if (mbx_mbox_cmd != 0x9b)
3002 goto error_out;
3003 } else
3004 goto error_out;
3005
3006 if (mbx_word_cnt == 0)
3007 goto error_out;
3008 if (rc != LPFC_MBX_DMP_ARG)
3009 goto error_out;
3010 if (mbx_mbox_cmd & ~0xff)
3011 goto error_out;
3012
3013 /* condition for stop mailbox dump */
3014 if (mbx_dump_cnt == 0)
3015 goto reset_out;
3016
3017 return nbytes;
3018
3019reset_out:
3020 /* Clean out command structure on command error out */
3021 memset(&idiag, 0, sizeof(idiag));
3022 return nbytes;
3023
3024error_out:
3025 /* Clean out command structure on command error out */
3026 memset(&idiag, 0, sizeof(idiag));
3027 return -EINVAL;
3028}
3029
3030/**
3031 * lpfc_idiag_extacc_avail_get - get the available extents information
3032 * @phba: pointer to lpfc hba data structure.
3033 * @pbuffer: pointer to internal buffer.
3034 * @len: length into the internal buffer data has been copied.
3035 *
3036 * Description:
3037 * This routine is to get the available extent information.
3038 *
3039 * Returns:
3040 * overall lenth of the data read into the internal buffer.
3041 **/
3042static int
3043lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
3044{
3045 uint16_t ext_cnt, ext_size;
3046
3047 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3048 "\nAvailable Extents Information:\n");
3049
3050 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3051 "\tPort Available VPI extents: ");
3052 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
3053 &ext_cnt, &ext_size);
3054 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3055 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3056
3057 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3058 "\tPort Available VFI extents: ");
3059 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
3060 &ext_cnt, &ext_size);
3061 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3062 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3063
3064 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3065 "\tPort Available RPI extents: ");
3066 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
3067 &ext_cnt, &ext_size);
3068 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3069 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3070
3071 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3072 "\tPort Available XRI extents: ");
3073 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
3074 &ext_cnt, &ext_size);
3075 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3076 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3077
3078 return len;
3079}
3080
3081/**
3082 * lpfc_idiag_extacc_alloc_get - get the allocated extents information
3083 * @phba: pointer to lpfc hba data structure.
3084 * @pbuffer: pointer to internal buffer.
3085 * @len: length into the internal buffer data has been copied.
3086 *
3087 * Description:
3088 * This routine is to get the allocated extent information.
3089 *
3090 * Returns:
3091 * overall lenth of the data read into the internal buffer.
3092 **/
3093static int
3094lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
3095{
3096 uint16_t ext_cnt, ext_size;
3097 int rc;
3098
3099 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3100 "\nAllocated Extents Information:\n");
3101
3102 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3103 "\tHost Allocated VPI extents: ");
3104 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
3105 &ext_cnt, &ext_size);
3106 if (!rc)
3107 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3108 "Port %d Extent %3d, Size %3d\n",
3109 phba->brd_no, ext_cnt, ext_size);
3110 else
3111 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3112 "N/A\n");
3113
3114 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3115 "\tHost Allocated VFI extents: ");
3116 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
3117 &ext_cnt, &ext_size);
3118 if (!rc)
3119 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3120 "Port %d Extent %3d, Size %3d\n",
3121 phba->brd_no, ext_cnt, ext_size);
3122 else
3123 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3124 "N/A\n");
3125
3126 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3127 "\tHost Allocated RPI extents: ");
3128 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
3129 &ext_cnt, &ext_size);
3130 if (!rc)
3131 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3132 "Port %d Extent %3d, Size %3d\n",
3133 phba->brd_no, ext_cnt, ext_size);
3134 else
3135 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3136 "N/A\n");
3137
3138 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3139 "\tHost Allocated XRI extents: ");
3140 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
3141 &ext_cnt, &ext_size);
3142 if (!rc)
3143 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3144 "Port %d Extent %3d, Size %3d\n",
3145 phba->brd_no, ext_cnt, ext_size);
3146 else
3147 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3148 "N/A\n");
3149
3150 return len;
3151}
3152
3153/**
3154 * lpfc_idiag_extacc_drivr_get - get driver extent information
3155 * @phba: pointer to lpfc hba data structure.
3156 * @pbuffer: pointer to internal buffer.
3157 * @len: length into the internal buffer data has been copied.
3158 *
3159 * Description:
3160 * This routine is to get the driver extent information.
3161 *
3162 * Returns:
3163 * overall lenth of the data read into the internal buffer.
3164 **/
3165static int
3166lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
3167{
3168 struct lpfc_rsrc_blks *rsrc_blks;
3169 int index;
3170
3171 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3172 "\nDriver Extents Information:\n");
3173
3174 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3175 "\tVPI extents:\n");
3176 index = 0;
3177 list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
3178 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3179 "\t\tBlock %3d: Start %4d, Count %4d\n",
3180 index, rsrc_blks->rsrc_start,
3181 rsrc_blks->rsrc_size);
3182 index++;
3183 }
3184 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3185 "\tVFI extents:\n");
3186 index = 0;
3187 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
3188 list) {
3189 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3190 "\t\tBlock %3d: Start %4d, Count %4d\n",
3191 index, rsrc_blks->rsrc_start,
3192 rsrc_blks->rsrc_size);
3193 index++;
3194 }
3195
3196 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3197 "\tRPI extents:\n");
3198 index = 0;
3199 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
3200 list) {
3201 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3202 "\t\tBlock %3d: Start %4d, Count %4d\n",
3203 index, rsrc_blks->rsrc_start,
3204 rsrc_blks->rsrc_size);
3205 index++;
3206 }
3207
3208 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3209 "\tXRI extents:\n");
3210 index = 0;
3211 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
3212 list) {
3213 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3214 "\t\tBlock %3d: Start %4d, Count %4d\n",
3215 index, rsrc_blks->rsrc_start,
3216 rsrc_blks->rsrc_size);
3217 index++;
3218 }
3219
3220 return len;
3221}
3222
3223/**
3224 * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands
3225 * @file: The file pointer to read from.
3226 * @buf: The buffer to copy the user data from.
3227 * @nbytes: The number of bytes to get.
3228 * @ppos: The position in the file to start reading from.
3229 *
3230 * This routine get the debugfs idiag command struct from user space and then
3231 * perform the syntax check for extent information access commands and sets
3232 * up the necessary states in the idiag command struct accordingly.
3233 *
3234 * It returns the @nbytges passing in from debugfs user space when successful.
3235 * In case of error conditions, it returns proper error code back to the user
3236 * space.
3237 **/
3238static ssize_t
3239lpfc_idiag_extacc_write(struct file *file, const char __user *buf,
3240 size_t nbytes, loff_t *ppos)
3241{
3242 struct lpfc_debug *debug = file->private_data;
3243 uint32_t ext_map;
3244 int rc;
3245
3246 /* This is a user write operation */
3247 debug->op = LPFC_IDIAG_OP_WR;
3248
3249 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
3250 if (rc < 0)
3251 return rc;
3252
3253 ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
3254
3255 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
3256 goto error_out;
3257 if (rc != LPFC_EXT_ACC_CMD_ARG)
3258 goto error_out;
3259 if (!(ext_map & LPFC_EXT_ACC_ALL))
3260 goto error_out;
3261
3262 return nbytes;
3263error_out:
3264 /* Clean out command structure on command error out */
3265 memset(&idiag, 0, sizeof(idiag));
3266 return -EINVAL;
3267}
3268
3269/**
3270 * lpfc_idiag_extacc_read - idiag debugfs read access to extent information
3271 * @file: The file pointer to read from.
3272 * @buf: The buffer to copy the data to.
3273 * @nbytes: The number of bytes to read.
3274 * @ppos: The position in the file to start reading from.
3275 *
3276 * Description:
3277 * This routine reads data from the proper extent information according to
3278 * the idiag command, and copies to user @buf.
3279 *
3280 * Returns:
3281 * This function returns the amount of data that was read (this could be less
3282 * than @nbytes if the end of the file was reached) or a negative error value.
3283 **/
3284static ssize_t
3285lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
3286 loff_t *ppos)
3287{
3288 struct lpfc_debug *debug = file->private_data;
3289 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
3290 char *pbuffer;
3291 uint32_t ext_map;
3292 int len = 0;
3293
3294 /* This is a user read operation */
3295 debug->op = LPFC_IDIAG_OP_RD;
3296
3297 if (!debug->buffer)
3298 debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL);
3299 if (!debug->buffer)
3300 return 0;
3301 pbuffer = debug->buffer;
3302 if (*ppos)
3303 return 0;
3304 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
3305 return 0;
3306
3307 ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
3308 if (ext_map & LPFC_EXT_ACC_AVAIL)
3309 len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len);
3310 if (ext_map & LPFC_EXT_ACC_ALLOC)
3311 len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len);
3312 if (ext_map & LPFC_EXT_ACC_DRIVR)
3313 len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len);
3314
3315 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
3316}
3317
2333#undef lpfc_debugfs_op_disc_trc 3318#undef lpfc_debugfs_op_disc_trc
2334static const struct file_operations lpfc_debugfs_op_disc_trc = { 3319static const struct file_operations lpfc_debugfs_op_disc_trc = {
2335 .owner = THIS_MODULE, 3320 .owner = THIS_MODULE,
@@ -2420,6 +3405,16 @@ static const struct file_operations lpfc_idiag_op_pciCfg = {
2420 .release = lpfc_idiag_cmd_release, 3405 .release = lpfc_idiag_cmd_release,
2421}; 3406};
2422 3407
3408#undef lpfc_idiag_op_barAcc
3409static const struct file_operations lpfc_idiag_op_barAcc = {
3410 .owner = THIS_MODULE,
3411 .open = lpfc_idiag_open,
3412 .llseek = lpfc_debugfs_lseek,
3413 .read = lpfc_idiag_baracc_read,
3414 .write = lpfc_idiag_baracc_write,
3415 .release = lpfc_idiag_cmd_release,
3416};
3417
2423#undef lpfc_idiag_op_queInfo 3418#undef lpfc_idiag_op_queInfo
2424static const struct file_operations lpfc_idiag_op_queInfo = { 3419static const struct file_operations lpfc_idiag_op_queInfo = {
2425 .owner = THIS_MODULE, 3420 .owner = THIS_MODULE,
@@ -2428,7 +3423,7 @@ static const struct file_operations lpfc_idiag_op_queInfo = {
2428 .release = lpfc_idiag_release, 3423 .release = lpfc_idiag_release,
2429}; 3424};
2430 3425
2431#undef lpfc_idiag_op_queacc 3426#undef lpfc_idiag_op_queAcc
2432static const struct file_operations lpfc_idiag_op_queAcc = { 3427static const struct file_operations lpfc_idiag_op_queAcc = {
2433 .owner = THIS_MODULE, 3428 .owner = THIS_MODULE,
2434 .open = lpfc_idiag_open, 3429 .open = lpfc_idiag_open,
@@ -2438,7 +3433,7 @@ static const struct file_operations lpfc_idiag_op_queAcc = {
2438 .release = lpfc_idiag_cmd_release, 3433 .release = lpfc_idiag_cmd_release,
2439}; 3434};
2440 3435
2441#undef lpfc_idiag_op_drbacc 3436#undef lpfc_idiag_op_drbAcc
2442static const struct file_operations lpfc_idiag_op_drbAcc = { 3437static const struct file_operations lpfc_idiag_op_drbAcc = {
2443 .owner = THIS_MODULE, 3438 .owner = THIS_MODULE,
2444 .open = lpfc_idiag_open, 3439 .open = lpfc_idiag_open,
@@ -2448,8 +3443,234 @@ static const struct file_operations lpfc_idiag_op_drbAcc = {
2448 .release = lpfc_idiag_cmd_release, 3443 .release = lpfc_idiag_cmd_release,
2449}; 3444};
2450 3445
3446#undef lpfc_idiag_op_ctlAcc
3447static const struct file_operations lpfc_idiag_op_ctlAcc = {
3448 .owner = THIS_MODULE,
3449 .open = lpfc_idiag_open,
3450 .llseek = lpfc_debugfs_lseek,
3451 .read = lpfc_idiag_ctlacc_read,
3452 .write = lpfc_idiag_ctlacc_write,
3453 .release = lpfc_idiag_cmd_release,
3454};
3455
3456#undef lpfc_idiag_op_mbxAcc
3457static const struct file_operations lpfc_idiag_op_mbxAcc = {
3458 .owner = THIS_MODULE,
3459 .open = lpfc_idiag_open,
3460 .llseek = lpfc_debugfs_lseek,
3461 .read = lpfc_idiag_mbxacc_read,
3462 .write = lpfc_idiag_mbxacc_write,
3463 .release = lpfc_idiag_cmd_release,
3464};
3465
3466#undef lpfc_idiag_op_extAcc
3467static const struct file_operations lpfc_idiag_op_extAcc = {
3468 .owner = THIS_MODULE,
3469 .open = lpfc_idiag_open,
3470 .llseek = lpfc_debugfs_lseek,
3471 .read = lpfc_idiag_extacc_read,
3472 .write = lpfc_idiag_extacc_write,
3473 .release = lpfc_idiag_cmd_release,
3474};
3475
2451#endif 3476#endif
2452 3477
3478/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
3479 * @phba: Pointer to HBA context object.
3480 * @dmabuf: Pointer to a DMA buffer descriptor.
3481 *
3482 * Description:
3483 * This routine dump a bsg pass-through non-embedded mailbox command with
3484 * external buffer.
3485 **/
3486void
3487lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3488 enum mbox_type mbox_tp, enum dma_type dma_tp,
3489 enum sta_type sta_tp,
3490 struct lpfc_dmabuf *dmabuf, uint32_t ext_buf)
3491{
3492#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3493 uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt;
3494 char line_buf[LPFC_MBX_ACC_LBUF_SZ];
3495 int len = 0;
3496 uint32_t do_dump = 0;
3497 uint32_t *pword;
3498 uint32_t i;
3499
3500 if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)
3501 return;
3502
3503 mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
3504 mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
3505 mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
3506 mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
3507
3508 if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) ||
3509 (*mbx_dump_cnt == 0) ||
3510 (*mbx_word_cnt == 0))
3511 return;
3512
3513 if (*mbx_mbox_cmd != 0x9B)
3514 return;
3515
3516 if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
3517 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
3518 do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
3519 printk(KERN_ERR "\nRead mbox command (x%x), "
3520 "nemb:0x%x, extbuf_cnt:%d:\n",
3521 sta_tp, nemb_tp, ext_buf);
3522 }
3523 }
3524 if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
3525 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
3526 do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
3527 printk(KERN_ERR "\nRead mbox buffer (x%x), "
3528 "nemb:0x%x, extbuf_seq:%d:\n",
3529 sta_tp, nemb_tp, ext_buf);
3530 }
3531 }
3532 if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
3533 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
3534 do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
3535 printk(KERN_ERR "\nWrite mbox command (x%x), "
3536 "nemb:0x%x, extbuf_cnt:%d:\n",
3537 sta_tp, nemb_tp, ext_buf);
3538 }
3539 }
3540 if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
3541 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
3542 do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
3543 printk(KERN_ERR "\nWrite mbox buffer (x%x), "
3544 "nemb:0x%x, extbuf_seq:%d:\n",
3545 sta_tp, nemb_tp, ext_buf);
3546 }
3547 }
3548
3549 /* dump buffer content */
3550 if (do_dump) {
3551 pword = (uint32_t *)dmabuf->virt;
3552 for (i = 0; i < *mbx_word_cnt; i++) {
3553 if (!(i % 8)) {
3554 if (i != 0)
3555 printk(KERN_ERR "%s\n", line_buf);
3556 len = 0;
3557 len += snprintf(line_buf+len,
3558 LPFC_MBX_ACC_LBUF_SZ-len,
3559 "%03d: ", i);
3560 }
3561 len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
3562 "%08x ", (uint32_t)*pword);
3563 pword++;
3564 }
3565 if ((i - 1) % 8)
3566 printk(KERN_ERR "%s\n", line_buf);
3567 (*mbx_dump_cnt)--;
3568 }
3569
3570 /* Clean out command structure on reaching dump count */
3571 if (*mbx_dump_cnt == 0)
3572 memset(&idiag, 0, sizeof(idiag));
3573 return;
3574#endif
3575}
3576
3577/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command
3578 * @phba: Pointer to HBA context object.
3579 * @dmabuf: Pointer to a DMA buffer descriptor.
3580 *
3581 * Description:
3582 * This routine dump a pass-through non-embedded mailbox command from issue
3583 * mailbox command.
3584 **/
3585void
3586lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
3587{
3588#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3589 uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd;
3590 char line_buf[LPFC_MBX_ACC_LBUF_SZ];
3591 int len = 0;
3592 uint32_t *pword;
3593 uint8_t *pbyte;
3594 uint32_t i, j;
3595
3596 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP)
3597 return;
3598
3599 mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
3600 mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
3601 mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
3602 mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
3603
3604 if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) ||
3605 (*mbx_dump_cnt == 0) ||
3606 (*mbx_word_cnt == 0))
3607 return;
3608
3609 if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) &&
3610 (*mbx_mbox_cmd != pmbox->mbxCommand))
3611 return;
3612
3613 /* dump buffer content */
3614 if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
3615 printk(KERN_ERR "Mailbox command:0x%x dump by word:\n",
3616 pmbox->mbxCommand);
3617 pword = (uint32_t *)pmbox;
3618 for (i = 0; i < *mbx_word_cnt; i++) {
3619 if (!(i % 8)) {
3620 if (i != 0)
3621 printk(KERN_ERR "%s\n", line_buf);
3622 len = 0;
3623 memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
3624 len += snprintf(line_buf+len,
3625 LPFC_MBX_ACC_LBUF_SZ-len,
3626 "%03d: ", i);
3627 }
3628 len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
3629 "%08x ",
3630 ((uint32_t)*pword) & 0xffffffff);
3631 pword++;
3632 }
3633 if ((i - 1) % 8)
3634 printk(KERN_ERR "%s\n", line_buf);
3635 printk(KERN_ERR "\n");
3636 }
3637 if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
3638 printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n",
3639 pmbox->mbxCommand);
3640 pbyte = (uint8_t *)pmbox;
3641 for (i = 0; i < *mbx_word_cnt; i++) {
3642 if (!(i % 8)) {
3643 if (i != 0)
3644 printk(KERN_ERR "%s\n", line_buf);
3645 len = 0;
3646 memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
3647 len += snprintf(line_buf+len,
3648 LPFC_MBX_ACC_LBUF_SZ-len,
3649 "%03d: ", i);
3650 }
3651 for (j = 0; j < 4; j++) {
3652 len += snprintf(line_buf+len,
3653 LPFC_MBX_ACC_LBUF_SZ-len,
3654 "%02x",
3655 ((uint8_t)*pbyte) & 0xff);
3656 pbyte++;
3657 }
3658 len += snprintf(line_buf+len,
3659 LPFC_MBX_ACC_LBUF_SZ-len, " ");
3660 }
3661 if ((i - 1) % 8)
3662 printk(KERN_ERR "%s\n", line_buf);
3663 printk(KERN_ERR "\n");
3664 }
3665 (*mbx_dump_cnt)--;
3666
3667 /* Clean out command structure on reaching dump count */
3668 if (*mbx_dump_cnt == 0)
3669 memset(&idiag, 0, sizeof(idiag));
3670 return;
3671#endif
3672}
3673
2453/** 3674/**
2454 * lpfc_debugfs_initialize - Initialize debugfs for a vport 3675 * lpfc_debugfs_initialize - Initialize debugfs for a vport
2455 * @vport: The vport pointer to initialize. 3676 * @vport: The vport pointer to initialize.
@@ -2673,7 +3894,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2673 vport, &lpfc_debugfs_op_nodelist); 3894 vport, &lpfc_debugfs_op_nodelist);
2674 if (!vport->debug_nodelist) { 3895 if (!vport->debug_nodelist) {
2675 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3896 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2676 "0409 Can't create debugfs nodelist\n"); 3897 "2985 Can't create debugfs nodelist\n");
2677 goto debug_failed; 3898 goto debug_failed;
2678 } 3899 }
2679 3900
@@ -2710,6 +3931,20 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2710 idiag.offset.last_rd = 0; 3931 idiag.offset.last_rd = 0;
2711 } 3932 }
2712 3933
3934 /* iDiag PCI BAR access */
3935 snprintf(name, sizeof(name), "barAcc");
3936 if (!phba->idiag_bar_acc) {
3937 phba->idiag_bar_acc =
3938 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3939 phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
3940 if (!phba->idiag_bar_acc) {
3941 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3942 "3056 Can't create idiag debugfs\n");
3943 goto debug_failed;
3944 }
3945 idiag.offset.last_rd = 0;
3946 }
3947
2713 /* iDiag get PCI function queue information */ 3948 /* iDiag get PCI function queue information */
2714 snprintf(name, sizeof(name), "queInfo"); 3949 snprintf(name, sizeof(name), "queInfo");
2715 if (!phba->idiag_que_info) { 3950 if (!phba->idiag_que_info) {
@@ -2749,6 +3984,50 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2749 } 3984 }
2750 } 3985 }
2751 3986
3987 /* iDiag access PCI function control registers */
3988 snprintf(name, sizeof(name), "ctlAcc");
3989 if (!phba->idiag_ctl_acc) {
3990 phba->idiag_ctl_acc =
3991 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3992 phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
3993 if (!phba->idiag_ctl_acc) {
3994 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3995 "2981 Can't create idiag debugfs\n");
3996 goto debug_failed;
3997 }
3998 }
3999
4000 /* iDiag access mbox commands */
4001 snprintf(name, sizeof(name), "mbxAcc");
4002 if (!phba->idiag_mbx_acc) {
4003 phba->idiag_mbx_acc =
4004 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
4005 phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
4006 if (!phba->idiag_mbx_acc) {
4007 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4008 "2980 Can't create idiag debugfs\n");
4009 goto debug_failed;
4010 }
4011 }
4012
4013 /* iDiag extents access commands */
4014 if (phba->sli4_hba.extents_in_use) {
4015 snprintf(name, sizeof(name), "extAcc");
4016 if (!phba->idiag_ext_acc) {
4017 phba->idiag_ext_acc =
4018 debugfs_create_file(name,
4019 S_IFREG|S_IRUGO|S_IWUSR,
4020 phba->idiag_root, phba,
4021 &lpfc_idiag_op_extAcc);
4022 if (!phba->idiag_ext_acc) {
4023 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4024 "2986 Cant create "
4025 "idiag debugfs\n");
4026 goto debug_failed;
4027 }
4028 }
4029 }
4030
2752debug_failed: 4031debug_failed:
2753 return; 4032 return;
2754#endif 4033#endif
@@ -2783,7 +4062,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2783 debugfs_remove(vport->debug_nodelist); /* nodelist */ 4062 debugfs_remove(vport->debug_nodelist); /* nodelist */
2784 vport->debug_nodelist = NULL; 4063 vport->debug_nodelist = NULL;
2785 } 4064 }
2786
2787 if (vport->vport_debugfs_root) { 4065 if (vport->vport_debugfs_root) {
2788 debugfs_remove(vport->vport_debugfs_root); /* vportX */ 4066 debugfs_remove(vport->vport_debugfs_root); /* vportX */
2789 vport->vport_debugfs_root = NULL; 4067 vport->vport_debugfs_root = NULL;
@@ -2827,6 +4105,21 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2827 * iDiag release 4105 * iDiag release
2828 */ 4106 */
2829 if (phba->sli_rev == LPFC_SLI_REV4) { 4107 if (phba->sli_rev == LPFC_SLI_REV4) {
4108 if (phba->idiag_ext_acc) {
4109 /* iDiag extAcc */
4110 debugfs_remove(phba->idiag_ext_acc);
4111 phba->idiag_ext_acc = NULL;
4112 }
4113 if (phba->idiag_mbx_acc) {
4114 /* iDiag mbxAcc */
4115 debugfs_remove(phba->idiag_mbx_acc);
4116 phba->idiag_mbx_acc = NULL;
4117 }
4118 if (phba->idiag_ctl_acc) {
4119 /* iDiag ctlAcc */
4120 debugfs_remove(phba->idiag_ctl_acc);
4121 phba->idiag_ctl_acc = NULL;
4122 }
2830 if (phba->idiag_drb_acc) { 4123 if (phba->idiag_drb_acc) {
2831 /* iDiag drbAcc */ 4124 /* iDiag drbAcc */
2832 debugfs_remove(phba->idiag_drb_acc); 4125 debugfs_remove(phba->idiag_drb_acc);
@@ -2842,6 +4135,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2842 debugfs_remove(phba->idiag_que_info); 4135 debugfs_remove(phba->idiag_que_info);
2843 phba->idiag_que_info = NULL; 4136 phba->idiag_que_info = NULL;
2844 } 4137 }
4138 if (phba->idiag_bar_acc) {
4139 /* iDiag barAcc */
4140 debugfs_remove(phba->idiag_bar_acc);
4141 phba->idiag_bar_acc = NULL;
4142 }
2845 if (phba->idiag_pci_cfg) { 4143 if (phba->idiag_pci_cfg) {
2846 /* iDiag pciCfg */ 4144 /* iDiag pciCfg */
2847 debugfs_remove(phba->idiag_pci_cfg); 4145 debugfs_remove(phba->idiag_pci_cfg);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 6525a5e62d27..f83bd944edd8 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -39,14 +39,51 @@
39/* hbqinfo output buffer size */ 39/* hbqinfo output buffer size */
40#define LPFC_HBQINFO_SIZE 8192 40#define LPFC_HBQINFO_SIZE 8192
41 41
42/*
43 * For SLI4 iDiag debugfs diagnostics tool
44 */
45
42/* pciConf */ 46/* pciConf */
43#define LPFC_PCI_CFG_BROWSE 0xffff 47#define LPFC_PCI_CFG_BROWSE 0xffff
44#define LPFC_PCI_CFG_RD_CMD_ARG 2 48#define LPFC_PCI_CFG_RD_CMD_ARG 2
45#define LPFC_PCI_CFG_WR_CMD_ARG 3 49#define LPFC_PCI_CFG_WR_CMD_ARG 3
46#define LPFC_PCI_CFG_SIZE 4096 50#define LPFC_PCI_CFG_SIZE 4096
47#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
48#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) 51#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
49 52
53#define IDIAG_PCICFG_WHERE_INDX 0
54#define IDIAG_PCICFG_COUNT_INDX 1
55#define IDIAG_PCICFG_VALUE_INDX 2
56
57/* barAcc */
58#define LPFC_PCI_BAR_BROWSE 0xffff
59#define LPFC_PCI_BAR_RD_CMD_ARG 3
60#define LPFC_PCI_BAR_WR_CMD_ARG 3
61
62#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16)
63#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
64#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
65#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32)
66
67#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
68#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
69
70#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
71#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
72#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
73#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
74
75#define IDIAG_BARACC_BAR_NUM_INDX 0
76#define IDIAG_BARACC_OFF_SET_INDX 1
77#define IDIAG_BARACC_ACC_MOD_INDX 2
78#define IDIAG_BARACC_REG_VAL_INDX 2
79#define IDIAG_BARACC_BAR_SZE_INDX 3
80
81#define IDIAG_BARACC_BAR_0 0
82#define IDIAG_BARACC_BAR_1 1
83#define IDIAG_BARACC_BAR_2 2
84
85#define SINGLE_WORD 1
86
50/* queue info */ 87/* queue info */
51#define LPFC_QUE_INFO_GET_BUF_SIZE 4096 88#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
52 89
@@ -63,7 +100,14 @@
63#define LPFC_IDIAG_WQ 4 100#define LPFC_IDIAG_WQ 4
64#define LPFC_IDIAG_RQ 5 101#define LPFC_IDIAG_RQ 5
65 102
66/* doorbell acc */ 103#define IDIAG_QUEACC_QUETP_INDX 0
104#define IDIAG_QUEACC_QUEID_INDX 1
105#define IDIAG_QUEACC_INDEX_INDX 2
106#define IDIAG_QUEACC_COUNT_INDX 3
107#define IDIAG_QUEACC_OFFST_INDX 4
108#define IDIAG_QUEACC_VALUE_INDX 5
109
110/* doorbell register acc */
67#define LPFC_DRB_ACC_ALL 0xffff 111#define LPFC_DRB_ACC_ALL 0xffff
68#define LPFC_DRB_ACC_RD_CMD_ARG 1 112#define LPFC_DRB_ACC_RD_CMD_ARG 1
69#define LPFC_DRB_ACC_WR_CMD_ARG 2 113#define LPFC_DRB_ACC_WR_CMD_ARG 2
@@ -76,6 +120,67 @@
76 120
77#define LPFC_DRB_MAX 4 121#define LPFC_DRB_MAX 4
78 122
123#define IDIAG_DRBACC_REGID_INDX 0
124#define IDIAG_DRBACC_VALUE_INDX 1
125
126/* control register acc */
127#define LPFC_CTL_ACC_ALL 0xffff
128#define LPFC_CTL_ACC_RD_CMD_ARG 1
129#define LPFC_CTL_ACC_WR_CMD_ARG 2
130#define LPFC_CTL_ACC_BUF_SIZE 256
131
132#define LPFC_CTL_PORT_SEM 1
133#define LPFC_CTL_PORT_STA 2
134#define LPFC_CTL_PORT_CTL 3
135#define LPFC_CTL_PORT_ER1 4
136#define LPFC_CTL_PORT_ER2 5
137#define LPFC_CTL_PDEV_CTL 6
138
139#define LPFC_CTL_MAX 6
140
141#define IDIAG_CTLACC_REGID_INDX 0
142#define IDIAG_CTLACC_VALUE_INDX 1
143
144/* mailbox access */
145#define LPFC_MBX_DMP_ARG 4
146
147#define LPFC_MBX_ACC_BUF_SIZE 512
148#define LPFC_MBX_ACC_LBUF_SZ 128
149
150#define LPFC_MBX_DMP_MBX_WORD 0x00000001
151#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
152#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
153
154#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
155#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
156#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
157#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
158#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
159 LPFC_BSG_DMP_MBX_RD_BUF | \
160 LPFC_BSG_DMP_MBX_WR_MBX | \
161 LPFC_BSG_DMP_MBX_WR_BUF)
162
163#define LPFC_MBX_DMP_ALL 0xffff
164#define LPFC_MBX_ALL_CMD 0xff
165
166#define IDIAG_MBXACC_MBCMD_INDX 0
167#define IDIAG_MBXACC_DPMAP_INDX 1
168#define IDIAG_MBXACC_DPCNT_INDX 2
169#define IDIAG_MBXACC_WDCNT_INDX 3
170
171/* extents access */
172#define LPFC_EXT_ACC_CMD_ARG 1
173#define LPFC_EXT_ACC_BUF_SIZE 4096
174
175#define LPFC_EXT_ACC_AVAIL 0x1
176#define LPFC_EXT_ACC_ALLOC 0x2
177#define LPFC_EXT_ACC_DRIVR 0x4
178#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \
179 LPFC_EXT_ACC_AVAIL | \
180 LPFC_EXT_ACC_ALLOC)
181
182#define IDIAG_EXTACC_EXMAP_INDX 0
183
79#define SIZE_U8 sizeof(uint8_t) 184#define SIZE_U8 sizeof(uint8_t)
80#define SIZE_U16 sizeof(uint16_t) 185#define SIZE_U16 sizeof(uint16_t)
81#define SIZE_U32 sizeof(uint32_t) 186#define SIZE_U32 sizeof(uint32_t)
@@ -110,6 +215,11 @@ struct lpfc_idiag_cmd {
110#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 215#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
111#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 216#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
112 217
218#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
219#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
220#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
221#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
222
113#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011 223#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
114#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012 224#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
115#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013 225#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
@@ -119,6 +229,17 @@ struct lpfc_idiag_cmd {
119#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022 229#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
120#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023 230#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
121#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024 231#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
232
233#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
234#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
235#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
236#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
237
238#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
239#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
240
241#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
242
122 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; 243 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
123}; 244};
124 245
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 32a084534f3e..023da0e00d38 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -647,21 +647,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
647 } 647 }
648 lpfc_cleanup_pending_mbox(vport); 648 lpfc_cleanup_pending_mbox(vport);
649 649
650 if (phba->sli_rev == LPFC_SLI_REV4) 650 if (phba->sli_rev == LPFC_SLI_REV4) {
651 lpfc_sli4_unreg_all_rpis(vport); 651 lpfc_sli4_unreg_all_rpis(vport);
652
653 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
654 lpfc_mbx_unreg_vpi(vport); 652 lpfc_mbx_unreg_vpi(vport);
655 spin_lock_irq(shost->host_lock); 653 spin_lock_irq(shost->host_lock);
656 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 654 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
657 spin_unlock_irq(shost->host_lock); 655 /*
658 } 656 * If VPI is unreged, driver need to do INIT_VPI
659 /* 657 * before re-registering
660 * If VPI is unreged, driver need to do INIT_VPI 658 */
661 * before re-registering
662 */
663 if (phba->sli_rev == LPFC_SLI_REV4) {
664 spin_lock_irq(shost->host_lock);
665 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 659 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
666 spin_unlock_irq(shost->host_lock); 660 spin_unlock_irq(shost->host_lock);
667 } 661 }
@@ -880,6 +874,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
880 phba->fcf.current_rec.fcf_indx, 874 phba->fcf.current_rec.fcf_indx,
881 irsp->ulpStatus, irsp->un.ulpWord[4], 875 irsp->ulpStatus, irsp->un.ulpWord[4],
882 irsp->ulpTimeout); 876 irsp->ulpTimeout);
877 lpfc_sli4_set_fcf_flogi_fail(phba,
878 phba->fcf.current_rec.fcf_indx);
883 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 879 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
884 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 880 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
885 if (rc) 881 if (rc)
@@ -1096,11 +1092,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1096 /* Set the fcfi to the fcfi we registered with */ 1092 /* Set the fcfi to the fcfi we registered with */
1097 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1093 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1098 } 1094 }
1099 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1095 } else {
1100 sp->cmn.request_multiple_Nport = 1; 1096 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1101 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1097 sp->cmn.request_multiple_Nport = 1;
1102 icmd->ulpCt_h = 1; 1098 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1103 icmd->ulpCt_l = 0; 1099 icmd->ulpCt_h = 1;
1100 icmd->ulpCt_l = 0;
1101 } else
1102 sp->cmn.request_multiple_Nport = 0;
1104 } 1103 }
1105 1104
1106 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1105 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
@@ -3656,7 +3655,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3656 } 3655 }
3657 3656
3658 icmd = &elsiocb->iocb; 3657 icmd = &elsiocb->iocb;
3659 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3658 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3659 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3660 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3660 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3661 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3661 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3662 pcmd += sizeof(uint32_t); 3662 pcmd += sizeof(uint32_t);
@@ -3673,7 +3673,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3673 return 1; 3673 return 1;
3674 3674
3675 icmd = &elsiocb->iocb; 3675 icmd = &elsiocb->iocb;
3676 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3676 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3677 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3677 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3678 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3678 3679
3679 if (mbox) 3680 if (mbox)
@@ -3695,7 +3696,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3695 return 1; 3696 return 1;
3696 3697
3697 icmd = &elsiocb->iocb; 3698 icmd = &elsiocb->iocb;
3698 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3699 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3700 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3699 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3701 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3700 3702
3701 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 3703 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
@@ -3781,7 +3783,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3781 3783
3782 icmd = &elsiocb->iocb; 3784 icmd = &elsiocb->iocb;
3783 oldcmd = &oldiocb->iocb; 3785 oldcmd = &oldiocb->iocb;
3784 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3786 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3787 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3785 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3788 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3786 3789
3787 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 3790 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
@@ -3853,7 +3856,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3853 3856
3854 icmd = &elsiocb->iocb; 3857 icmd = &elsiocb->iocb;
3855 oldcmd = &oldiocb->iocb; 3858 oldcmd = &oldiocb->iocb;
3856 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3859 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3860 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3857 3861
3858 /* Xmit ADISC ACC response tag <ulpIoTag> */ 3862 /* Xmit ADISC ACC response tag <ulpIoTag> */
3859 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3863 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -3931,7 +3935,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3931 3935
3932 icmd = &elsiocb->iocb; 3936 icmd = &elsiocb->iocb;
3933 oldcmd = &oldiocb->iocb; 3937 oldcmd = &oldiocb->iocb;
3934 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3938 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3939 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3940
3935 /* Xmit PRLI ACC response tag <ulpIoTag> */ 3941 /* Xmit PRLI ACC response tag <ulpIoTag> */
3936 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3942 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3937 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 3943 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
@@ -4035,7 +4041,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4035 4041
4036 icmd = &elsiocb->iocb; 4042 icmd = &elsiocb->iocb;
4037 oldcmd = &oldiocb->iocb; 4043 oldcmd = &oldiocb->iocb;
4038 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 4044 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4045 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4046
4039 /* Xmit RNID ACC response tag <ulpIoTag> */ 4047 /* Xmit RNID ACC response tag <ulpIoTag> */
4040 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4048 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4041 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4049 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
@@ -4163,7 +4171,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4163 if (!elsiocb) 4171 if (!elsiocb)
4164 return 1; 4172 return 1;
4165 4173
4166 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */ 4174 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
4175 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4176
4167 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4177 /* Xmit ECHO ACC response tag <ulpIoTag> */
4168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4178 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4169 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4179 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
@@ -5054,13 +5064,15 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5054 uint8_t *pcmd; 5064 uint8_t *pcmd;
5055 struct lpfc_iocbq *elsiocb; 5065 struct lpfc_iocbq *elsiocb;
5056 struct lpfc_nodelist *ndlp; 5066 struct lpfc_nodelist *ndlp;
5057 uint16_t xri; 5067 uint16_t oxid;
5068 uint16_t rxid;
5058 uint32_t cmdsize; 5069 uint32_t cmdsize;
5059 5070
5060 mb = &pmb->u.mb; 5071 mb = &pmb->u.mb;
5061 5072
5062 ndlp = (struct lpfc_nodelist *) pmb->context2; 5073 ndlp = (struct lpfc_nodelist *) pmb->context2;
5063 xri = (uint16_t) ((unsigned long)(pmb->context1)); 5074 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5075 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5064 pmb->context1 = NULL; 5076 pmb->context1 = NULL;
5065 pmb->context2 = NULL; 5077 pmb->context2 = NULL;
5066 5078
@@ -5082,7 +5094,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5082 return; 5094 return;
5083 5095
5084 icmd = &elsiocb->iocb; 5096 icmd = &elsiocb->iocb;
5085 icmd->ulpContext = xri; 5097 icmd->ulpContext = rxid;
5098 icmd->unsli3.rcvsli3.ox_id = oxid;
5086 5099
5087 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5100 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5088 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5101 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5137,13 +5150,16 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5137 uint8_t *pcmd; 5150 uint8_t *pcmd;
5138 struct lpfc_iocbq *elsiocb; 5151 struct lpfc_iocbq *elsiocb;
5139 struct lpfc_nodelist *ndlp; 5152 struct lpfc_nodelist *ndlp;
5140 uint16_t xri, status; 5153 uint16_t status;
5154 uint16_t oxid;
5155 uint16_t rxid;
5141 uint32_t cmdsize; 5156 uint32_t cmdsize;
5142 5157
5143 mb = &pmb->u.mb; 5158 mb = &pmb->u.mb;
5144 5159
5145 ndlp = (struct lpfc_nodelist *) pmb->context2; 5160 ndlp = (struct lpfc_nodelist *) pmb->context2;
5146 xri = (uint16_t) ((unsigned long)(pmb->context1)); 5161 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5162 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5147 pmb->context1 = NULL; 5163 pmb->context1 = NULL;
5148 pmb->context2 = NULL; 5164 pmb->context2 = NULL;
5149 5165
@@ -5165,7 +5181,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5165 return; 5181 return;
5166 5182
5167 icmd = &elsiocb->iocb; 5183 icmd = &elsiocb->iocb;
5168 icmd->ulpContext = xri; 5184 icmd->ulpContext = rxid;
5185 icmd->unsli3.rcvsli3.ox_id = oxid;
5169 5186
5170 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5187 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5171 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5188 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5238,8 +5255,9 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5255 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5239 if (mbox) { 5256 if (mbox) {
5240 lpfc_read_lnk_stat(phba, mbox); 5257 lpfc_read_lnk_stat(phba, mbox);
5241 mbox->context1 = 5258 mbox->context1 = (void *)((unsigned long)
5242 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 5259 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5260 cmdiocb->iocb.ulpContext)); /* rx_id */
5243 mbox->context2 = lpfc_nlp_get(ndlp); 5261 mbox->context2 = lpfc_nlp_get(ndlp);
5244 mbox->vport = vport; 5262 mbox->vport = vport;
5245 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 5263 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
@@ -5314,7 +5332,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5314 pcmd += sizeof(uint32_t); /* Skip past command */ 5332 pcmd += sizeof(uint32_t); /* Skip past command */
5315 5333
5316 /* use the command's xri in the response */ 5334 /* use the command's xri in the response */
5317 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; 5335 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
5336 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5318 5337
5319 rtv_rsp = (struct RTV_RSP *)pcmd; 5338 rtv_rsp = (struct RTV_RSP *)pcmd;
5320 5339
@@ -5399,8 +5418,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5418 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5400 if (mbox) { 5419 if (mbox) {
5401 lpfc_read_lnk_stat(phba, mbox); 5420 lpfc_read_lnk_stat(phba, mbox);
5402 mbox->context1 = 5421 mbox->context1 = (void *)((unsigned long)
5403 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 5422 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5423 cmdiocb->iocb.ulpContext)); /* rx_id */
5404 mbox->context2 = lpfc_nlp_get(ndlp); 5424 mbox->context2 = lpfc_nlp_get(ndlp);
5405 mbox->vport = vport; 5425 mbox->vport = vport;
5406 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 5426 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
@@ -5554,7 +5574,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
5554 5574
5555 icmd = &elsiocb->iocb; 5575 icmd = &elsiocb->iocb;
5556 oldcmd = &oldiocb->iocb; 5576 oldcmd = &oldiocb->iocb;
5557 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 5577 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5578 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5558 5579
5559 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5580 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5560 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5581 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -6586,7 +6607,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6586{ 6607{
6587 struct lpfc_vport *vport; 6608 struct lpfc_vport *vport;
6588 unsigned long flags; 6609 unsigned long flags;
6589 int i; 6610 int i = 0;
6590 6611
6591 /* The physical ports are always vpi 0 - translate is unnecessary. */ 6612 /* The physical ports are always vpi 0 - translate is unnecessary. */
6592 if (vpi > 0) { 6613 if (vpi > 0) {
@@ -6609,7 +6630,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6609 6630
6610 spin_lock_irqsave(&phba->hbalock, flags); 6631 spin_lock_irqsave(&phba->hbalock, flags);
6611 list_for_each_entry(vport, &phba->port_list, listentry) { 6632 list_for_each_entry(vport, &phba->port_list, listentry) {
6612 if (vport->vpi == vpi) { 6633 if (vport->vpi == i) {
6613 spin_unlock_irqrestore(&phba->hbalock, flags); 6634 spin_unlock_irqrestore(&phba->hbalock, flags);
6614 return vport; 6635 return vport;
6615 } 6636 }
@@ -7787,6 +7808,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7787{ 7808{
7788 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 7809 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
7789 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 7810 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7811 uint16_t lxri = 0;
7790 7812
7791 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7813 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7792 unsigned long iflag = 0; 7814 unsigned long iflag = 0;
@@ -7815,7 +7837,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7815 } 7837 }
7816 } 7838 }
7817 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7839 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7818 sglq_entry = __lpfc_get_active_sglq(phba, xri); 7840 lxri = lpfc_sli4_xri_inrange(phba, xri);
7841 if (lxri == NO_XRI) {
7842 spin_unlock_irqrestore(&phba->hbalock, iflag);
7843 return;
7844 }
7845 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
7819 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 7846 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
7820 spin_unlock_irqrestore(&phba->hbalock, iflag); 7847 spin_unlock_irqrestore(&phba->hbalock, iflag);
7821 return; 7848 return;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 18d0dbfda2bc..0b47adf9fee8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1109,6 +1109,28 @@ out:
1109 return; 1109 return;
1110} 1110}
1111 1111
1112/**
1113 * lpfc_sli4_clear_fcf_rr_bmask
1114 * @phba pointer to the struct lpfc_hba for this port.
1115 * This fucnction resets the round robin bit mask and clears the
1116 * fcf priority list. The list deletions are done while holding the
1117 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1118 * from the lpfc_fcf_pri record.
1119 **/
1120void
1121lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1122{
1123 struct lpfc_fcf_pri *fcf_pri;
1124 struct lpfc_fcf_pri *next_fcf_pri;
1125 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1126 spin_lock_irq(&phba->hbalock);
1127 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1128 &phba->fcf.fcf_pri_list, list) {
1129 list_del_init(&fcf_pri->list);
1130 fcf_pri->fcf_rec.flag = 0;
1131 }
1132 spin_unlock_irq(&phba->hbalock);
1133}
1112static void 1134static void
1113lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1135lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1114{ 1136{
@@ -1130,7 +1152,8 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1130 spin_unlock_irq(&phba->hbalock); 1152 spin_unlock_irq(&phba->hbalock);
1131 1153
1132 /* If there is a pending FCoE event, restart FCF table scan. */ 1154 /* If there is a pending FCoE event, restart FCF table scan. */
1133 if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1155 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1156 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1134 goto fail_out; 1157 goto fail_out;
1135 1158
1136 /* Mark successful completion of FCF table scan */ 1159 /* Mark successful completion of FCF table scan */
@@ -1250,6 +1273,30 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1250} 1273}
1251 1274
1252/** 1275/**
1276 * lpfc_update_fcf_record - Update driver fcf record
1277 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1278 * @phba: pointer to lpfc hba data structure.
1279 * @fcf_index: Index for the lpfc_fcf_record.
1280 * @new_fcf_record: pointer to hba fcf record.
1281 *
1282 * This routine updates the driver FCF priority record from the new HBA FCF
1283 * record. This routine is called with the host lock held.
1284 **/
1285static void
1286__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1287 struct fcf_record *new_fcf_record
1288 )
1289{
1290 struct lpfc_fcf_pri *fcf_pri;
1291
1292 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1293 fcf_pri->fcf_rec.fcf_index = fcf_index;
1294 /* FCF record priority */
1295 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1296
1297}
1298
1299/**
1253 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1300 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1254 * @fcf: pointer to driver fcf record. 1301 * @fcf: pointer to driver fcf record.
1255 * @new_fcf_record: pointer to fcf record. 1302 * @new_fcf_record: pointer to fcf record.
@@ -1332,6 +1379,9 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1332 fcf_rec->addr_mode = addr_mode; 1379 fcf_rec->addr_mode = addr_mode;
1333 fcf_rec->vlan_id = vlan_id; 1380 fcf_rec->vlan_id = vlan_id;
1334 fcf_rec->flag |= (flag | RECORD_VALID); 1381 fcf_rec->flag |= (flag | RECORD_VALID);
1382 __lpfc_update_fcf_record_pri(phba,
1383 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1384 new_fcf_record);
1335} 1385}
1336 1386
1337/** 1387/**
@@ -1834,6 +1884,8 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1834 return false; 1884 return false;
1835 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) 1885 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1836 return false; 1886 return false;
1887 if (fcf_rec->priority != new_fcf_record->fip_priority)
1888 return false;
1837 return true; 1889 return true;
1838} 1890}
1839 1891
@@ -1897,6 +1949,152 @@ stop_flogi_current_fcf:
1897} 1949}
1898 1950
1899/** 1951/**
1952 * lpfc_sli4_fcf_pri_list_del
1953 * @phba: pointer to lpfc hba data structure.
1954 * @fcf_index the index of the fcf record to delete
1955 * This routine checks the on list flag of the fcf_index to be deleted.
1956 * If it is one the list then it is removed from the list, and the flag
1957 * is cleared. This routine grab the hbalock before removing the fcf
1958 * record from the list.
1959 **/
1960static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
1961 uint16_t fcf_index)
1962{
1963 struct lpfc_fcf_pri *new_fcf_pri;
1964
1965 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1966 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1967 "3058 deleting idx x%x pri x%x flg x%x\n",
1968 fcf_index, new_fcf_pri->fcf_rec.priority,
1969 new_fcf_pri->fcf_rec.flag);
1970 spin_lock_irq(&phba->hbalock);
1971 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
1972 if (phba->fcf.current_rec.priority ==
1973 new_fcf_pri->fcf_rec.priority)
1974 phba->fcf.eligible_fcf_cnt--;
1975 list_del_init(&new_fcf_pri->list);
1976 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
1977 }
1978 spin_unlock_irq(&phba->hbalock);
1979}
1980
1981/**
1982 * lpfc_sli4_set_fcf_flogi_fail
1983 * @phba: pointer to lpfc hba data structure.
1984 * @fcf_index the index of the fcf record to update
1985 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
1986 * flag so the the round robin slection for the particular priority level
1987 * will try a different fcf record that does not have this bit set.
1988 * If the fcf record is re-read for any reason this flag is cleared brfore
1989 * adding it to the priority list.
1990 **/
1991void
1992lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
1993{
1994 struct lpfc_fcf_pri *new_fcf_pri;
1995 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1996 spin_lock_irq(&phba->hbalock);
1997 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
1998 spin_unlock_irq(&phba->hbalock);
1999}
2000
2001/**
2002 * lpfc_sli4_fcf_pri_list_add
2003 * @phba: pointer to lpfc hba data structure.
2004 * @fcf_index the index of the fcf record to add
2005 * This routine checks the priority of the fcf_index to be added.
2006 * If it is a lower priority than the current head of the fcf_pri list
2007 * then it is added to the list in the right order.
2008 * If it is the same priority as the current head of the list then it
2009 * is added to the head of the list and its bit in the rr_bmask is set.
2010 * If the fcf_index to be added is of a higher priority than the current
2011 * head of the list then the rr_bmask is cleared, its bit is set in the
2012 * rr_bmask and it is added to the head of the list.
2013 * returns:
2014 * 0=success 1=failure
2015 **/
2016int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
2017 struct fcf_record *new_fcf_record)
2018{
2019 uint16_t current_fcf_pri;
2020 uint16_t last_index;
2021 struct lpfc_fcf_pri *fcf_pri;
2022 struct lpfc_fcf_pri *next_fcf_pri;
2023 struct lpfc_fcf_pri *new_fcf_pri;
2024 int ret;
2025
2026 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2027 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2028 "3059 adding idx x%x pri x%x flg x%x\n",
2029 fcf_index, new_fcf_record->fip_priority,
2030 new_fcf_pri->fcf_rec.flag);
2031 spin_lock_irq(&phba->hbalock);
2032 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2033 list_del_init(&new_fcf_pri->list);
2034 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2035 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2036 if (list_empty(&phba->fcf.fcf_pri_list)) {
2037 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2038 ret = lpfc_sli4_fcf_rr_index_set(phba,
2039 new_fcf_pri->fcf_rec.fcf_index);
2040 goto out;
2041 }
2042
2043 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2044 LPFC_SLI4_FCF_TBL_INDX_MAX);
2045 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2046 ret = 0; /* Empty rr list */
2047 goto out;
2048 }
2049 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2050 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2051 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2052 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2053 memset(phba->fcf.fcf_rr_bmask, 0,
2054 sizeof(*phba->fcf.fcf_rr_bmask));
2055 /* fcfs_at_this_priority_level = 1; */
2056 phba->fcf.eligible_fcf_cnt = 1;
2057 } else
2058 /* fcfs_at_this_priority_level++; */
2059 phba->fcf.eligible_fcf_cnt++;
2060 ret = lpfc_sli4_fcf_rr_index_set(phba,
2061 new_fcf_pri->fcf_rec.fcf_index);
2062 goto out;
2063 }
2064
2065 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2066 &phba->fcf.fcf_pri_list, list) {
2067 if (new_fcf_pri->fcf_rec.priority <=
2068 fcf_pri->fcf_rec.priority) {
2069 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2070 list_add(&new_fcf_pri->list,
2071 &phba->fcf.fcf_pri_list);
2072 else
2073 list_add(&new_fcf_pri->list,
2074 &((struct lpfc_fcf_pri *)
2075 fcf_pri->list.prev)->list);
2076 ret = 0;
2077 goto out;
2078 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2079 || new_fcf_pri->fcf_rec.priority <
2080 next_fcf_pri->fcf_rec.priority) {
2081 list_add(&new_fcf_pri->list, &fcf_pri->list);
2082 ret = 0;
2083 goto out;
2084 }
2085 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2086 continue;
2087
2088 }
2089 ret = 1;
2090out:
2091 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2092 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2093 spin_unlock_irq(&phba->hbalock);
2094 return ret;
2095}
2096
2097/**
1900 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 2098 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1901 * @phba: pointer to lpfc hba data structure. 2099 * @phba: pointer to lpfc hba data structure.
1902 * @mboxq: pointer to mailbox object. 2100 * @mboxq: pointer to mailbox object.
@@ -1958,6 +2156,9 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1958 * record for roundrobin FCF failover. 2156 * record for roundrobin FCF failover.
1959 */ 2157 */
1960 if (!rc) { 2158 if (!rc) {
2159 lpfc_sli4_fcf_pri_list_del(phba,
2160 bf_get(lpfc_fcf_record_fcf_index,
2161 new_fcf_record));
1961 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2162 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1962 "2781 FCF (x%x) failed connection " 2163 "2781 FCF (x%x) failed connection "
1963 "list check: (x%x/x%x)\n", 2164 "list check: (x%x/x%x)\n",
@@ -2005,7 +2206,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2005 goto read_next_fcf; 2206 goto read_next_fcf;
2006 } else { 2207 } else {
2007 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2208 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2008 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 2209 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2210 new_fcf_record);
2009 if (rc) 2211 if (rc)
2010 goto read_next_fcf; 2212 goto read_next_fcf;
2011 } 2213 }
@@ -2018,7 +2220,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2018 */ 2220 */
2019 spin_lock_irq(&phba->hbalock); 2221 spin_lock_irq(&phba->hbalock);
2020 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2222 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2021 if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2223 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2224 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2022 new_fcf_record, vlan_id)) { 2225 new_fcf_record, vlan_id)) {
2023 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 2226 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2024 phba->fcf.current_rec.fcf_indx) { 2227 phba->fcf.current_rec.fcf_indx) {
@@ -2232,7 +2435,8 @@ read_next_fcf:
2232 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2435 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2233 return; 2436 return;
2234 2437
2235 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2438 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2439 phba->fcf.fcf_flag & FCF_IN_USE) {
2236 /* 2440 /*
2237 * In case the current in-use FCF record no 2441 * In case the current in-use FCF record no
2238 * longer existed during FCF discovery that 2442 * longer existed during FCF discovery that
@@ -2247,7 +2451,6 @@ read_next_fcf:
2247 spin_lock_irq(&phba->hbalock); 2451 spin_lock_irq(&phba->hbalock);
2248 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2452 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2249 spin_unlock_irq(&phba->hbalock); 2453 spin_unlock_irq(&phba->hbalock);
2250 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2251 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2454 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2252 LPFC_FCOE_FCF_GET_FIRST); 2455 LPFC_FCOE_FCF_GET_FIRST);
2253 return; 2456 return;
@@ -2424,7 +2627,8 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2424 2627
2425 /* Update the eligible FCF record index bmask */ 2628 /* Update the eligible FCF record index bmask */
2426 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2629 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2427 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 2630
2631 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2428 2632
2429out: 2633out:
2430 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2634 lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -2645,6 +2849,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2645 vport->vpi_state |= LPFC_VPI_REGISTERED; 2849 vport->vpi_state |= LPFC_VPI_REGISTERED;
2646 vport->fc_flag |= FC_VFI_REGISTERED; 2850 vport->fc_flag |= FC_VFI_REGISTERED;
2647 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2851 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2852 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2648 spin_unlock_irq(shost->host_lock); 2853 spin_unlock_irq(shost->host_lock);
2649 2854
2650 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2855 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
@@ -2893,8 +3098,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2893 goto out; 3098 goto out;
2894 } 3099 }
2895 /* Reset FCF roundrobin bmask for new discovery */ 3100 /* Reset FCF roundrobin bmask for new discovery */
2896 memset(phba->fcf.fcf_rr_bmask, 0, 3101 lpfc_sli4_clear_fcf_rr_bmask(phba);
2897 sizeof(*phba->fcf.fcf_rr_bmask));
2898 } 3102 }
2899 3103
2900 return; 3104 return;
@@ -5592,7 +5796,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
5592 spin_unlock_irq(&phba->hbalock); 5796 spin_unlock_irq(&phba->hbalock);
5593 5797
5594 /* Reset FCF roundrobin bmask for new discovery */ 5798 /* Reset FCF roundrobin bmask for new discovery */
5595 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 5799 lpfc_sli4_clear_fcf_rr_bmask(phba);
5596 5800
5597 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5801 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5598 5802
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ab4c4d651d0c..046edc4ab35f 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3470,11 +3470,16 @@ typedef struct {
3470 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ 3470 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
3471 3471
3472struct rcv_sli3 { 3472struct rcv_sli3 {
3473 uint32_t word8Rsvd;
3474#ifdef __BIG_ENDIAN_BITFIELD 3473#ifdef __BIG_ENDIAN_BITFIELD
3474 uint16_t ox_id;
3475 uint16_t seq_cnt;
3476
3475 uint16_t vpi; 3477 uint16_t vpi;
3476 uint16_t word9Rsvd; 3478 uint16_t word9Rsvd;
3477#else /* __LITTLE_ENDIAN */ 3479#else /* __LITTLE_ENDIAN */
3480 uint16_t seq_cnt;
3481 uint16_t ox_id;
3482
3478 uint16_t word9Rsvd; 3483 uint16_t word9Rsvd;
3479 uint16_t vpi; 3484 uint16_t vpi;
3480#endif 3485#endif
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 11e26a26b5d1..7f8003b5181e 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,15 +170,8 @@ struct lpfc_sli_intf {
170#define LPFC_PCI_FUNC3 3 170#define LPFC_PCI_FUNC3 3
171#define LPFC_PCI_FUNC4 4 171#define LPFC_PCI_FUNC4 4
172 172
173/* SLI4 interface type-2 control register offsets */ 173/* SLI4 interface type-2 PDEV_CTL register */
174#define LPFC_CTL_PORT_SEM_OFFSET 0x400
175#define LPFC_CTL_PORT_STA_OFFSET 0x404
176#define LPFC_CTL_PORT_CTL_OFFSET 0x408
177#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
178#define LPFC_CTL_PORT_ER2_OFFSET 0x410
179#define LPFC_CTL_PDEV_CTL_OFFSET 0x414 174#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
180
181/* Some SLI4 interface type-2 PDEV_CTL register bits */
182#define LPFC_CTL_PDEV_CTL_DRST 0x00000001 175#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
183#define LPFC_CTL_PDEV_CTL_FRST 0x00000002 176#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
184#define LPFC_CTL_PDEV_CTL_DD 0x00000004 177#define LPFC_CTL_PDEV_CTL_DD 0x00000004
@@ -337,6 +330,7 @@ struct lpfc_cqe {
337#define CQE_CODE_RELEASE_WQE 0x2 330#define CQE_CODE_RELEASE_WQE 0x2
338#define CQE_CODE_RECEIVE 0x4 331#define CQE_CODE_RECEIVE 0x4
339#define CQE_CODE_XRI_ABORTED 0x5 332#define CQE_CODE_XRI_ABORTED 0x5
333#define CQE_CODE_RECEIVE_V1 0x9
340 334
341/* completion queue entry for wqe completions */ 335/* completion queue entry for wqe completions */
342struct lpfc_wcqe_complete { 336struct lpfc_wcqe_complete {
@@ -440,7 +434,10 @@ struct lpfc_rcqe {
440#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ 434#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
441#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ 435#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
442#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ 436#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
443 uint32_t reserved1; 437 uint32_t word1;
438#define lpfc_rcqe_fcf_id_v1_SHIFT 0
439#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
440#define lpfc_rcqe_fcf_id_v1_WORD word1
444 uint32_t word2; 441 uint32_t word2;
445#define lpfc_rcqe_length_SHIFT 16 442#define lpfc_rcqe_length_SHIFT 16
446#define lpfc_rcqe_length_MASK 0x0000FFFF 443#define lpfc_rcqe_length_MASK 0x0000FFFF
@@ -451,6 +448,9 @@ struct lpfc_rcqe {
451#define lpfc_rcqe_fcf_id_SHIFT 0 448#define lpfc_rcqe_fcf_id_SHIFT 0
452#define lpfc_rcqe_fcf_id_MASK 0x0000003F 449#define lpfc_rcqe_fcf_id_MASK 0x0000003F
453#define lpfc_rcqe_fcf_id_WORD word2 450#define lpfc_rcqe_fcf_id_WORD word2
451#define lpfc_rcqe_rq_id_v1_SHIFT 0
452#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF
453#define lpfc_rcqe_rq_id_v1_WORD word2
454 uint32_t word3; 454 uint32_t word3;
455#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT 455#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
456#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK 456#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
@@ -515,7 +515,7 @@ struct lpfc_register {
515/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */ 515/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
516#define LPFC_SLI_INTF 0x0058 516#define LPFC_SLI_INTF 0x0058
517 517
518#define LPFC_SLIPORT_IF2_SMPHR 0x0400 518#define LPFC_CTL_PORT_SEM_OFFSET 0x400
519#define lpfc_port_smphr_perr_SHIFT 31 519#define lpfc_port_smphr_perr_SHIFT 31
520#define lpfc_port_smphr_perr_MASK 0x1 520#define lpfc_port_smphr_perr_MASK 0x1
521#define lpfc_port_smphr_perr_WORD word0 521#define lpfc_port_smphr_perr_WORD word0
@@ -575,7 +575,7 @@ struct lpfc_register {
575#define LPFC_POST_STAGE_PORT_READY 0xC000 575#define LPFC_POST_STAGE_PORT_READY 0xC000
576#define LPFC_POST_STAGE_PORT_UE 0xF000 576#define LPFC_POST_STAGE_PORT_UE 0xF000
577 577
578#define LPFC_SLIPORT_STATUS 0x0404 578#define LPFC_CTL_PORT_STA_OFFSET 0x404
579#define lpfc_sliport_status_err_SHIFT 31 579#define lpfc_sliport_status_err_SHIFT 31
580#define lpfc_sliport_status_err_MASK 0x1 580#define lpfc_sliport_status_err_MASK 0x1
581#define lpfc_sliport_status_err_WORD word0 581#define lpfc_sliport_status_err_WORD word0
@@ -593,7 +593,7 @@ struct lpfc_register {
593#define lpfc_sliport_status_rdy_WORD word0 593#define lpfc_sliport_status_rdy_WORD word0
594#define MAX_IF_TYPE_2_RESETS 1000 594#define MAX_IF_TYPE_2_RESETS 1000
595 595
596#define LPFC_SLIPORT_CNTRL 0x0408 596#define LPFC_CTL_PORT_CTL_OFFSET 0x408
597#define lpfc_sliport_ctrl_end_SHIFT 30 597#define lpfc_sliport_ctrl_end_SHIFT 30
598#define lpfc_sliport_ctrl_end_MASK 0x1 598#define lpfc_sliport_ctrl_end_MASK 0x1
599#define lpfc_sliport_ctrl_end_WORD word0 599#define lpfc_sliport_ctrl_end_WORD word0
@@ -604,8 +604,8 @@ struct lpfc_register {
604#define lpfc_sliport_ctrl_ip_WORD word0 604#define lpfc_sliport_ctrl_ip_WORD word0
605#define LPFC_SLIPORT_INIT_PORT 1 605#define LPFC_SLIPORT_INIT_PORT 1
606 606
607#define LPFC_SLIPORT_ERR_1 0x040C 607#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
608#define LPFC_SLIPORT_ERR_2 0x0410 608#define LPFC_CTL_PORT_ER2_OFFSET 0x410
609 609
610/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically 610/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
611 * reside in BAR 2. 611 * reside in BAR 2.
@@ -3198,6 +3198,8 @@ struct lpfc_grp_hdr {
3198#define lpfc_grp_hdr_id_MASK 0x000000FF 3198#define lpfc_grp_hdr_id_MASK 0x000000FF
3199#define lpfc_grp_hdr_id_WORD word2 3199#define lpfc_grp_hdr_id_WORD word2
3200 uint8_t rev_name[128]; 3200 uint8_t rev_name[128];
3201 uint8_t date[12];
3202 uint8_t revision[32];
3201}; 3203};
3202 3204
3203#define FCP_COMMAND 0x0 3205#define FCP_COMMAND 0x0
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 148b98ddbb1d..a3c820083c36 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2927,6 +2927,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2927 sizeof fc_host_symbolic_name(shost)); 2927 sizeof fc_host_symbolic_name(shost));
2928 2928
2929 fc_host_supported_speeds(shost) = 0; 2929 fc_host_supported_speeds(shost) = 0;
2930 if (phba->lmt & LMT_16Gb)
2931 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
2930 if (phba->lmt & LMT_10Gb) 2932 if (phba->lmt & LMT_10Gb)
2931 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2933 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2932 if (phba->lmt & LMT_8Gb) 2934 if (phba->lmt & LMT_8Gb)
@@ -3632,8 +3634,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3632 lpfc_sli4_fcf_dead_failthrough(phba); 3634 lpfc_sli4_fcf_dead_failthrough(phba);
3633 } else { 3635 } else {
3634 /* Reset FCF roundrobin bmask for new discovery */ 3636 /* Reset FCF roundrobin bmask for new discovery */
3635 memset(phba->fcf.fcf_rr_bmask, 0, 3637 lpfc_sli4_clear_fcf_rr_bmask(phba);
3636 sizeof(*phba->fcf.fcf_rr_bmask));
3637 /* 3638 /*
3638 * Handling fast FCF failover to a DEAD FCF event is 3639 * Handling fast FCF failover to a DEAD FCF event is
3639 * considered equalivant to receiving CVL to all vports. 3640 * considered equalivant to receiving CVL to all vports.
@@ -3647,7 +3648,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3647 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3648 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3648 3649
3649 vport = lpfc_find_vport_by_vpid(phba, 3650 vport = lpfc_find_vport_by_vpid(phba,
3650 acqe_fip->index - phba->vpi_base); 3651 acqe_fip->index);
3651 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3652 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3652 if (!ndlp) 3653 if (!ndlp)
3653 break; 3654 break;
@@ -3719,8 +3720,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3719 * Reset FCF roundrobin bmask for new 3720 * Reset FCF roundrobin bmask for new
3720 * discovery. 3721 * discovery.
3721 */ 3722 */
3722 memset(phba->fcf.fcf_rr_bmask, 0, 3723 lpfc_sli4_clear_fcf_rr_bmask(phba);
3723 sizeof(*phba->fcf.fcf_rr_bmask));
3724 } 3724 }
3725 break; 3725 break;
3726 default: 3726 default:
@@ -4035,6 +4035,34 @@ lpfc_reset_hba(struct lpfc_hba *phba)
4035} 4035}
4036 4036
4037/** 4037/**
4038 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure.
4040 *
4041 * This function enables the PCI SR-IOV virtual functions to a physical
4042 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4043 * enable the number of virtual functions to the physical function. As
4044 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4045 * API call does not considered as an error condition for most of the device.
4046 **/
4047uint16_t
4048lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4049{
4050 struct pci_dev *pdev = phba->pcidev;
4051 uint16_t nr_virtfn;
4052 int pos;
4053
4054 if (!pdev->is_physfn)
4055 return 0;
4056
4057 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4058 if (pos == 0)
4059 return 0;
4060
4061 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4062 return nr_virtfn;
4063}
4064
4065/**
4038 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4066 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure. 4067 * @phba: pointer to lpfc hba data structure.
4040 * @nr_vfn: number of virtual functions to be enabled. 4068 * @nr_vfn: number of virtual functions to be enabled.
@@ -4049,8 +4077,17 @@ int
4049lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4077lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4050{ 4078{
4051 struct pci_dev *pdev = phba->pcidev; 4079 struct pci_dev *pdev = phba->pcidev;
4080 uint16_t max_nr_vfn;
4052 int rc; 4081 int rc;
4053 4082
4083 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4084 if (nr_vfn > max_nr_vfn) {
4085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4086 "3057 Requested vfs (%d) greater than "
4087 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4088 return -EINVAL;
4089 }
4090
4054 rc = pci_enable_sriov(pdev, nr_vfn); 4091 rc = pci_enable_sriov(pdev, nr_vfn);
4055 if (rc) { 4092 if (rc) {
4056 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -4516,7 +4553,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4516 } 4553 }
4517 } 4554 }
4518 4555
4519 return rc; 4556 return 0;
4520 4557
4521out_free_fcp_eq_hdl: 4558out_free_fcp_eq_hdl:
4522 kfree(phba->sli4_hba.fcp_eq_hdl); 4559 kfree(phba->sli4_hba.fcp_eq_hdl);
@@ -4966,17 +5003,14 @@ out_free_mem:
4966 * @phba: pointer to lpfc hba data structure. 5003 * @phba: pointer to lpfc hba data structure.
4967 * 5004 *
4968 * This routine is invoked to post rpi header templates to the 5005 * This routine is invoked to post rpi header templates to the
4969 * HBA consistent with the SLI-4 interface spec. This routine 5006 * port for those SLI4 ports that do not support extents. This routine
4970 * posts a PAGE_SIZE memory region to the port to hold up to 5007 * posts a PAGE_SIZE memory region to the port to hold up to
4971 * PAGE_SIZE modulo 64 rpi context headers. 5008 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
4972 * No locks are held here because this is an initialization routine 5009 * and should be called only when interrupts are disabled.
4973 * called only from probe or lpfc_online when interrupts are not
4974 * enabled and the driver is reinitializing the device.
4975 * 5010 *
4976 * Return codes 5011 * Return codes
4977 * 0 - successful 5012 * 0 - successful
4978 * -ENOMEM - No available memory 5013 * -ERROR - otherwise.
4979 * -EIO - The mailbox failed to complete successfully.
4980 **/ 5014 **/
4981int 5015int
4982lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5016lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
@@ -5687,17 +5721,22 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5687 break; 5721 break;
5688 case LPFC_SLI_INTF_IF_TYPE_2: 5722 case LPFC_SLI_INTF_IF_TYPE_2:
5689 phba->sli4_hba.u.if_type2.ERR1regaddr = 5723 phba->sli4_hba.u.if_type2.ERR1regaddr =
5690 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; 5724 phba->sli4_hba.conf_regs_memmap_p +
5725 LPFC_CTL_PORT_ER1_OFFSET;
5691 phba->sli4_hba.u.if_type2.ERR2regaddr = 5726 phba->sli4_hba.u.if_type2.ERR2regaddr =
5692 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; 5727 phba->sli4_hba.conf_regs_memmap_p +
5728 LPFC_CTL_PORT_ER2_OFFSET;
5693 phba->sli4_hba.u.if_type2.CTRLregaddr = 5729 phba->sli4_hba.u.if_type2.CTRLregaddr =
5694 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; 5730 phba->sli4_hba.conf_regs_memmap_p +
5731 LPFC_CTL_PORT_CTL_OFFSET;
5695 phba->sli4_hba.u.if_type2.STATUSregaddr = 5732 phba->sli4_hba.u.if_type2.STATUSregaddr =
5696 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; 5733 phba->sli4_hba.conf_regs_memmap_p +
5734 LPFC_CTL_PORT_STA_OFFSET;
5697 phba->sli4_hba.SLIINTFregaddr = 5735 phba->sli4_hba.SLIINTFregaddr =
5698 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5736 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5699 phba->sli4_hba.PSMPHRregaddr = 5737 phba->sli4_hba.PSMPHRregaddr =
5700 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; 5738 phba->sli4_hba.conf_regs_memmap_p +
5739 LPFC_CTL_PORT_SEM_OFFSET;
5701 phba->sli4_hba.RQDBregaddr = 5740 phba->sli4_hba.RQDBregaddr =
5702 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5741 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5703 phba->sli4_hba.WQDBregaddr = 5742 phba->sli4_hba.WQDBregaddr =
@@ -8859,11 +8898,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8859 return -EINVAL; 8898 return -EINVAL;
8860 } 8899 }
8861 lpfc_decode_firmware_rev(phba, fwrev, 1); 8900 lpfc_decode_firmware_rev(phba, fwrev, 1);
8862 if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) { 8901 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8864 "3023 Updating Firmware. Current Version:%s " 8903 "3023 Updating Firmware. Current Version:%s "
8865 "New Version:%s\n", 8904 "New Version:%s\n",
8866 fwrev, image->rev_name); 8905 fwrev, image->revision);
8867 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 8906 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8868 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 8907 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8869 GFP_KERNEL); 8908 GFP_KERNEL);
@@ -8892,9 +8931,9 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8892 fw->size - offset); 8931 fw->size - offset);
8893 break; 8932 break;
8894 } 8933 }
8895 temp_offset += SLI4_PAGE_SIZE;
8896 memcpy(dmabuf->virt, fw->data + temp_offset, 8934 memcpy(dmabuf->virt, fw->data + temp_offset,
8897 SLI4_PAGE_SIZE); 8935 SLI4_PAGE_SIZE);
8936 temp_offset += SLI4_PAGE_SIZE;
8898 } 8937 }
8899 rc = lpfc_wr_object(phba, &dma_buffer_list, 8938 rc = lpfc_wr_object(phba, &dma_buffer_list,
8900 (fw->size - offset), &offset); 8939 (fw->size - offset), &offset);
@@ -9005,6 +9044,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9005 } 9044 }
9006 9045
9007 INIT_LIST_HEAD(&phba->active_rrq_list); 9046 INIT_LIST_HEAD(&phba->active_rrq_list);
9047 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9008 9048
9009 /* Set up common device driver resources */ 9049 /* Set up common device driver resources */
9010 error = lpfc_setup_driver_resource_phase2(phba); 9050 error = lpfc_setup_driver_resource_phase2(phba);
@@ -9112,7 +9152,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9112 9152
9113 /* Check if there are static vports to be created. */ 9153 /* Check if there are static vports to be created. */
9114 lpfc_create_static_vport(phba); 9154 lpfc_create_static_vport(phba);
9115
9116 return 0; 9155 return 0;
9117 9156
9118out_disable_intr: 9157out_disable_intr:
@@ -9483,6 +9522,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9483 } 9522 }
9484 9523
9485 pci_restore_state(pdev); 9524 pci_restore_state(pdev);
9525
9526 /*
9527 * As the new kernel behavior of pci_restore_state() API call clears
9528 * device saved_state flag, need to save the restored state again.
9529 */
9530 pci_save_state(pdev);
9531
9486 if (pdev->is_busmaster) 9532 if (pdev->is_busmaster)
9487 pci_set_master(pdev); 9533 pci_set_master(pdev);
9488 9534
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 556767028353..83450cc5c4d3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2031,7 +2031,7 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1); 2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2032 bf_set(lpfc_init_vfi_vfi, init_vfi, 2032 bf_set(lpfc_init_vfi_vfi, init_vfi,
2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]); 2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2034 bf_set(lpfc_init_vpi_vpi, init_vfi, 2034 bf_set(lpfc_init_vfi_vpi, init_vfi,
2035 vport->phba->vpi_ids[vport->vpi]); 2035 vport->phba->vpi_ids[vport->vpi]);
2036 bf_set(lpfc_init_vfi_fcfi, init_vfi, 2036 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2037 vport->phba->fcf.fcfi); 2037 vport->phba->fcf.fcfi);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3ccc97496ebf..eadd241eeff1 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1302,13 +1302,13 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1302 case SCSI_PROT_NORMAL: 1302 case SCSI_PROT_NORMAL:
1303 default: 1303 default:
1304 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1304 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1305 "9063 BLKGRD: Bad op/guard:%d/%d combination\n", 1305 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1306 scsi_get_prot_op(sc), guard_type); 1306 scsi_get_prot_op(sc));
1307 ret = 1; 1307 ret = 1;
1308 break; 1308 break;
1309 1309
1310 } 1310 }
1311 } else if (guard_type == SHOST_DIX_GUARD_CRC) { 1311 } else {
1312 switch (scsi_get_prot_op(sc)) { 1312 switch (scsi_get_prot_op(sc)) {
1313 case SCSI_PROT_READ_STRIP: 1313 case SCSI_PROT_READ_STRIP:
1314 case SCSI_PROT_WRITE_INSERT: 1314 case SCSI_PROT_WRITE_INSERT:
@@ -1324,17 +1324,18 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1324 1324
1325 case SCSI_PROT_READ_INSERT: 1325 case SCSI_PROT_READ_INSERT:
1326 case SCSI_PROT_WRITE_STRIP: 1326 case SCSI_PROT_WRITE_STRIP:
1327 *txop = BG_OP_IN_CRC_OUT_NODIF;
1328 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1329 break;
1330
1327 case SCSI_PROT_NORMAL: 1331 case SCSI_PROT_NORMAL:
1328 default: 1332 default:
1329 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1333 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1330 "9075 BLKGRD: Bad op/guard:%d/%d combination\n", 1334 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1331 scsi_get_prot_op(sc), guard_type); 1335 scsi_get_prot_op(sc));
1332 ret = 1; 1336 ret = 1;
1333 break; 1337 break;
1334 } 1338 }
1335 } else {
1336 /* unsupported format */
1337 BUG();
1338 } 1339 }
1339 1340
1340 return ret; 1341 return ret;
@@ -1352,45 +1353,6 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
1352 return sc->device->sector_size; 1353 return sc->device->sector_size;
1353} 1354}
1354 1355
1355/**
1356 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1357 * @sc: in: SCSI command
1358 * @apptagmask: out: app tag mask
1359 * @apptagval: out: app tag value
1360 * @reftag: out: ref tag (reference tag)
1361 *
1362 * Description:
1363 * Extract DIF parameters from the command if possible. Otherwise,
1364 * use default parameters.
1365 *
1366 **/
1367static inline void
1368lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1369 uint16_t *apptagval, uint32_t *reftag)
1370{
1371 struct scsi_dif_tuple *spt;
1372 unsigned char op = scsi_get_prot_op(sc);
1373 unsigned int protcnt = scsi_prot_sg_count(sc);
1374 static int cnt;
1375
1376 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1377 op == SCSI_PROT_WRITE_PASS)) {
1378
1379 cnt++;
1380 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1381 scsi_prot_sglist(sc)[0].offset;
1382 *apptagmask = 0;
1383 *apptagval = 0;
1384 *reftag = cpu_to_be32(spt->ref_tag);
1385
1386 } else {
1387 /* SBC defines ref tag to be lower 32bits of LBA */
1388 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1389 *apptagmask = 0;
1390 *apptagval = 0;
1391 }
1392}
1393
1394/* 1356/*
1395 * This function sets up buffer list for protection groups of 1357 * This function sets up buffer list for protection groups of
1396 * type LPFC_PG_TYPE_NO_DIF 1358 * type LPFC_PG_TYPE_NO_DIF
@@ -1427,9 +1389,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1427 dma_addr_t physaddr; 1389 dma_addr_t physaddr;
1428 int i = 0, num_bde = 0, status; 1390 int i = 0, num_bde = 0, status;
1429 int datadir = sc->sc_data_direction; 1391 int datadir = sc->sc_data_direction;
1430 unsigned blksize;
1431 uint32_t reftag; 1392 uint32_t reftag;
1432 uint16_t apptagmask, apptagval; 1393 unsigned blksize;
1433 uint8_t txop, rxop; 1394 uint8_t txop, rxop;
1434 1395
1435 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1396 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
@@ -1438,17 +1399,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1438 1399
1439 /* extract some info from the scsi command for pde*/ 1400 /* extract some info from the scsi command for pde*/
1440 blksize = lpfc_cmd_blksize(sc); 1401 blksize = lpfc_cmd_blksize(sc);
1441 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1402 reftag = scsi_get_lba(sc) & 0xffffffff;
1442 1403
1443 /* setup PDE5 with what we have */ 1404 /* setup PDE5 with what we have */
1444 pde5 = (struct lpfc_pde5 *) bpl; 1405 pde5 = (struct lpfc_pde5 *) bpl;
1445 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1406 memset(pde5, 0, sizeof(struct lpfc_pde5));
1446 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1407 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1447 pde5->reftag = reftag;
1448 1408
1449 /* Endianness conversion if necessary for PDE5 */ 1409 /* Endianness conversion if necessary for PDE5 */
1450 pde5->word0 = cpu_to_le32(pde5->word0); 1410 pde5->word0 = cpu_to_le32(pde5->word0);
1451 pde5->reftag = cpu_to_le32(pde5->reftag); 1411 pde5->reftag = cpu_to_le32(reftag);
1452 1412
1453 /* advance bpl and increment bde count */ 1413 /* advance bpl and increment bde count */
1454 num_bde++; 1414 num_bde++;
@@ -1463,10 +1423,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1463 if (datadir == DMA_FROM_DEVICE) { 1423 if (datadir == DMA_FROM_DEVICE) {
1464 bf_set(pde6_ce, pde6, 1); 1424 bf_set(pde6_ce, pde6, 1);
1465 bf_set(pde6_re, pde6, 1); 1425 bf_set(pde6_re, pde6, 1);
1466 bf_set(pde6_ae, pde6, 1);
1467 } 1426 }
1468 bf_set(pde6_ai, pde6, 1); 1427 bf_set(pde6_ai, pde6, 1);
1469 bf_set(pde6_apptagval, pde6, apptagval); 1428 bf_set(pde6_ae, pde6, 0);
1429 bf_set(pde6_apptagval, pde6, 0);
1470 1430
1471 /* Endianness conversion if necessary for PDE6 */ 1431 /* Endianness conversion if necessary for PDE6 */
1472 pde6->word0 = cpu_to_le32(pde6->word0); 1432 pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1551,7 +1511,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1551 unsigned char pgdone = 0, alldone = 0; 1511 unsigned char pgdone = 0, alldone = 0;
1552 unsigned blksize; 1512 unsigned blksize;
1553 uint32_t reftag; 1513 uint32_t reftag;
1554 uint16_t apptagmask, apptagval;
1555 uint8_t txop, rxop; 1514 uint8_t txop, rxop;
1556 int num_bde = 0; 1515 int num_bde = 0;
1557 1516
@@ -1571,7 +1530,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1571 1530
1572 /* extract some info from the scsi command */ 1531 /* extract some info from the scsi command */
1573 blksize = lpfc_cmd_blksize(sc); 1532 blksize = lpfc_cmd_blksize(sc);
1574 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1533 reftag = scsi_get_lba(sc) & 0xffffffff;
1575 1534
1576 split_offset = 0; 1535 split_offset = 0;
1577 do { 1536 do {
@@ -1579,11 +1538,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1579 pde5 = (struct lpfc_pde5 *) bpl; 1538 pde5 = (struct lpfc_pde5 *) bpl;
1580 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1539 memset(pde5, 0, sizeof(struct lpfc_pde5));
1581 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1540 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1582 pde5->reftag = reftag;
1583 1541
1584 /* Endianness conversion if necessary for PDE5 */ 1542 /* Endianness conversion if necessary for PDE5 */
1585 pde5->word0 = cpu_to_le32(pde5->word0); 1543 pde5->word0 = cpu_to_le32(pde5->word0);
1586 pde5->reftag = cpu_to_le32(pde5->reftag); 1544 pde5->reftag = cpu_to_le32(reftag);
1587 1545
1588 /* advance bpl and increment bde count */ 1546 /* advance bpl and increment bde count */
1589 num_bde++; 1547 num_bde++;
@@ -1597,9 +1555,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1597 bf_set(pde6_oprx, pde6, rxop); 1555 bf_set(pde6_oprx, pde6, rxop);
1598 bf_set(pde6_ce, pde6, 1); 1556 bf_set(pde6_ce, pde6, 1);
1599 bf_set(pde6_re, pde6, 1); 1557 bf_set(pde6_re, pde6, 1);
1600 bf_set(pde6_ae, pde6, 1);
1601 bf_set(pde6_ai, pde6, 1); 1558 bf_set(pde6_ai, pde6, 1);
1602 bf_set(pde6_apptagval, pde6, apptagval); 1559 bf_set(pde6_ae, pde6, 0);
1560 bf_set(pde6_apptagval, pde6, 0);
1603 1561
1604 /* Endianness conversion if necessary for PDE6 */ 1562 /* Endianness conversion if necessary for PDE6 */
1605 pde6->word0 = cpu_to_le32(pde6->word0); 1563 pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1621,8 +1579,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1621 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1579 memset(pde7, 0, sizeof(struct lpfc_pde7));
1622 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1580 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1623 1581
1624 pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); 1582 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1625 pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1583 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1626 1584
1627 protgrp_blks = protgroup_len / 8; 1585 protgrp_blks = protgroup_len / 8;
1628 protgrp_bytes = protgrp_blks * blksize; 1586 protgrp_bytes = protgrp_blks * blksize;
@@ -1632,7 +1590,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1632 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1590 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1633 protgroup_offset += protgroup_remainder; 1591 protgroup_offset += protgroup_remainder;
1634 protgrp_blks = protgroup_remainder / 8; 1592 protgrp_blks = protgroup_remainder / 8;
1635 protgrp_bytes = protgroup_remainder * blksize; 1593 protgrp_bytes = protgrp_blks * blksize;
1636 } else { 1594 } else {
1637 protgroup_offset = 0; 1595 protgroup_offset = 0;
1638 curr_prot++; 1596 curr_prot++;
@@ -2006,16 +1964,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2006 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1964 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2007 /* 1965 /*
2008 * setup sense data descriptor 0 per SPC-4 as an information 1966 * setup sense data descriptor 0 per SPC-4 as an information
2009 * field, and put the failing LBA in it 1967 * field, and put the failing LBA in it.
1968 * This code assumes there was also a guard/app/ref tag error
1969 * indication.
2010 */ 1970 */
2011 cmd->sense_buffer[8] = 0; /* Information */ 1971 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2012 cmd->sense_buffer[9] = 0xa; /* Add. length */ 1972 cmd->sense_buffer[8] = 0; /* Information descriptor type */
1973 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
1974 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2013 bghm /= cmd->device->sector_size; 1975 bghm /= cmd->device->sector_size;
2014 1976
2015 failing_sector = scsi_get_lba(cmd); 1977 failing_sector = scsi_get_lba(cmd);
2016 failing_sector += bghm; 1978 failing_sector += bghm;
2017 1979
2018 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]); 1980 /* Descriptor Information */
1981 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
2019 } 1982 }
2020 1983
2021 if (!ret) { 1984 if (!ret) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 98999bbd8cbf..8b799f047a99 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
561 if (rrq) { 561 if (rrq) {
562 rrq->send_rrq = send_rrq; 562 rrq->send_rrq = send_rrq;
563 rrq->xritag = phba->sli4_hba.xri_ids[xritag]; 563 rrq->xritag = xritag;
564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
565 rrq->ndlp = ndlp; 565 rrq->ndlp = ndlp;
566 rrq->nlp_DID = ndlp->nlp_DID; 566 rrq->nlp_DID = ndlp->nlp_DID;
@@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2452 2452
2453 /* search continue save q for same XRI */ 2453 /* search continue save q for same XRI */
2454 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2454 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2455 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 2455 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2456 saveq->iocb.unsli3.rcvsli3.ox_id) {
2456 list_add_tail(&saveq->list, &iocbq->list); 2457 list_add_tail(&saveq->list, &iocbq->list);
2457 found = 1; 2458 found = 1;
2458 break; 2459 break;
@@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3355 irspiocbq); 3356 irspiocbq);
3356 break; 3357 break;
3357 case CQE_CODE_RECEIVE: 3358 case CQE_CODE_RECEIVE:
3359 case CQE_CODE_RECEIVE_V1:
3358 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3360 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3359 cq_event); 3361 cq_event);
3360 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3362 lpfc_sli4_handle_received_buffer(phba, dmabuf);
@@ -4712,10 +4714,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4712 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 4714 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4713 * @phba: Pointer to HBA context object. 4715 * @phba: Pointer to HBA context object.
4714 * @type: The resource extent type. 4716 * @type: The resource extent type.
4717 * @extnt_count: buffer to hold port available extent count.
4718 * @extnt_size: buffer to hold element count per extent.
4715 * 4719 *
4716 * This function allocates all SLI4 resource identifiers. 4720 * This function calls the port and retrievs the number of available
4721 * extents and their size for a particular extent type.
4722 *
4723 * Returns: 0 if successful. Nonzero otherwise.
4717 **/ 4724 **/
4718static int 4725int
4719lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 4726lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4720 uint16_t *extnt_count, uint16_t *extnt_size) 4727 uint16_t *extnt_count, uint16_t *extnt_size)
4721{ 4728{
@@ -4892,7 +4899,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
4892 req_len, *emb); 4899 req_len, *emb);
4893 if (alloc_len < req_len) { 4900 if (alloc_len < req_len) {
4894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4895 "9000 Allocated DMA memory size (x%x) is " 4902 "2982 Allocated DMA memory size (x%x) is "
4896 "less than the requested DMA memory " 4903 "less than the requested DMA memory "
4897 "size (x%x)\n", alloc_len, req_len); 4904 "size (x%x)\n", alloc_len, req_len);
4898 return -ENOMEM; 4905 return -ENOMEM;
@@ -5506,6 +5513,154 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5506} 5513}
5507 5514
5508/** 5515/**
5516 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5517 * @phba: Pointer to HBA context object.
5518 * @type: The resource extent type.
5519 * @extnt_count: buffer to hold port extent count response
5520 * @extnt_size: buffer to hold port extent size response.
5521 *
5522 * This function calls the port to read the host allocated extents
5523 * for a particular type.
5524 **/
5525int
5526lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5527 uint16_t *extnt_cnt, uint16_t *extnt_size)
5528{
5529 bool emb;
5530 int rc = 0;
5531 uint16_t curr_blks = 0;
5532 uint32_t req_len, emb_len;
5533 uint32_t alloc_len, mbox_tmo;
5534 struct list_head *blk_list_head;
5535 struct lpfc_rsrc_blks *rsrc_blk;
5536 LPFC_MBOXQ_t *mbox;
5537 void *virtaddr = NULL;
5538 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5539 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5540 union lpfc_sli4_cfg_shdr *shdr;
5541
5542 switch (type) {
5543 case LPFC_RSC_TYPE_FCOE_VPI:
5544 blk_list_head = &phba->lpfc_vpi_blk_list;
5545 break;
5546 case LPFC_RSC_TYPE_FCOE_XRI:
5547 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5548 break;
5549 case LPFC_RSC_TYPE_FCOE_VFI:
5550 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5551 break;
5552 case LPFC_RSC_TYPE_FCOE_RPI:
5553 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5554 break;
5555 default:
5556 return -EIO;
5557 }
5558
5559 /* Count the number of extents currently allocatd for this type. */
5560 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5561 if (curr_blks == 0) {
5562 /*
5563 * The GET_ALLOCATED mailbox does not return the size,
5564 * just the count. The size should be just the size
5565 * stored in the current allocated block and all sizes
5566 * for an extent type are the same so set the return
5567 * value now.
5568 */
5569 *extnt_size = rsrc_blk->rsrc_size;
5570 }
5571 curr_blks++;
5572 }
5573
5574 /* Calculate the total requested length of the dma memory. */
5575 req_len = curr_blks * sizeof(uint16_t);
5576
5577 /*
5578 * Calculate the size of an embedded mailbox. The uint32_t
5579 * accounts for extents-specific word.
5580 */
5581 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5582 sizeof(uint32_t);
5583
5584 /*
5585 * Presume the allocation and response will fit into an embedded
5586 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5587 */
5588 emb = LPFC_SLI4_MBX_EMBED;
5589 req_len = emb_len;
5590 if (req_len > emb_len) {
5591 req_len = curr_blks * sizeof(uint16_t) +
5592 sizeof(union lpfc_sli4_cfg_shdr) +
5593 sizeof(uint32_t);
5594 emb = LPFC_SLI4_MBX_NEMBED;
5595 }
5596
5597 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5598 if (!mbox)
5599 return -ENOMEM;
5600 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5601
5602 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5603 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5604 req_len, emb);
5605 if (alloc_len < req_len) {
5606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5607 "2983 Allocated DMA memory size (x%x) is "
5608 "less than the requested DMA memory "
5609 "size (x%x)\n", alloc_len, req_len);
5610 rc = -ENOMEM;
5611 goto err_exit;
5612 }
5613 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5614 if (unlikely(rc)) {
5615 rc = -EIO;
5616 goto err_exit;
5617 }
5618
5619 if (!phba->sli4_hba.intr_enable)
5620 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5621 else {
5622 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5623 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5624 }
5625
5626 if (unlikely(rc)) {
5627 rc = -EIO;
5628 goto err_exit;
5629 }
5630
5631 /*
5632 * Figure out where the response is located. Then get local pointers
5633 * to the response data. The port does not guarantee to respond to
5634 * all extents counts request so update the local variable with the
5635 * allocated count from the port.
5636 */
5637 if (emb == LPFC_SLI4_MBX_EMBED) {
5638 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5639 shdr = &rsrc_ext->header.cfg_shdr;
5640 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5641 } else {
5642 virtaddr = mbox->sge_array->addr[0];
5643 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5644 shdr = &n_rsrc->cfg_shdr;
5645 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5646 }
5647
5648 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5649 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5650 "2984 Failed to read allocated resources "
5651 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5652 type,
5653 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5654 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5655 rc = -EIO;
5656 goto err_exit;
5657 }
5658 err_exit:
5659 lpfc_sli4_mbox_cmd_free(phba, mbox);
5660 return rc;
5661}
5662
5663/**
5509 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5664 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
5510 * @phba: Pointer to HBA context object. 5665 * @phba: Pointer to HBA context object.
5511 * 5666 *
@@ -5837,6 +5992,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5837 "Advanced Error Reporting (AER)\n"); 5992 "Advanced Error Reporting (AER)\n");
5838 phba->cfg_aer_support = 0; 5993 phba->cfg_aer_support = 0;
5839 } 5994 }
5995 rc = 0;
5840 } 5996 }
5841 5997
5842 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5998 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -6634,6 +6790,9 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6634 unsigned long iflags; 6790 unsigned long iflags;
6635 int rc; 6791 int rc;
6636 6792
6793 /* dump from issue mailbox command if setup */
6794 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
6795
6637 rc = lpfc_mbox_dev_check(phba); 6796 rc = lpfc_mbox_dev_check(phba);
6638 if (unlikely(rc)) { 6797 if (unlikely(rc)) {
6639 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7318,12 +7477,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7318 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 7477 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
7319 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7478 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7320 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 7479 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7321 break; 7480 break;
7322 case CMD_XMIT_SEQUENCE64_CX: 7481 case CMD_XMIT_SEQUENCE64_CX:
7323 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 7482 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
7324 iocbq->iocb.un.ulpWord[3]); 7483 iocbq->iocb.un.ulpWord[3]);
7325 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 7484 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7326 iocbq->iocb.ulpContext); 7485 iocbq->iocb.unsli3.rcvsli3.ox_id);
7327 /* The entire sequence is transmitted for this IOCB */ 7486 /* The entire sequence is transmitted for this IOCB */
7328 xmit_len = total_len; 7487 xmit_len = total_len;
7329 cmnd = CMD_XMIT_SEQUENCE64_CR; 7488 cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -7341,7 +7500,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7341 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 7500 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
7342 wqe->xmit_sequence.xmit_len = xmit_len; 7501 wqe->xmit_sequence.xmit_len = xmit_len;
7343 command_type = OTHER_COMMAND; 7502 command_type = OTHER_COMMAND;
7344 break; 7503 break;
7345 case CMD_XMIT_BCAST64_CN: 7504 case CMD_XMIT_BCAST64_CN:
7346 /* word3 iocb=iotag32 wqe=seq_payload_len */ 7505 /* word3 iocb=iotag32 wqe=seq_payload_len */
7347 wqe->xmit_bcast64.seq_payload_len = xmit_len; 7506 wqe->xmit_bcast64.seq_payload_len = xmit_len;
@@ -7355,7 +7514,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7355 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 7514 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
7356 LPFC_WQE_LENLOC_WORD3); 7515 LPFC_WQE_LENLOC_WORD3);
7357 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 7516 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7358 break; 7517 break;
7359 case CMD_FCP_IWRITE64_CR: 7518 case CMD_FCP_IWRITE64_CR:
7360 command_type = FCP_COMMAND_DATA_OUT; 7519 command_type = FCP_COMMAND_DATA_OUT;
7361 /* word3 iocb=iotag wqe=payload_offset_len */ 7520 /* word3 iocb=iotag wqe=payload_offset_len */
@@ -7375,7 +7534,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7375 LPFC_WQE_LENLOC_WORD4); 7534 LPFC_WQE_LENLOC_WORD4);
7376 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 7535 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
7377 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 7536 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
7378 break; 7537 break;
7379 case CMD_FCP_IREAD64_CR: 7538 case CMD_FCP_IREAD64_CR:
7380 /* word3 iocb=iotag wqe=payload_offset_len */ 7539 /* word3 iocb=iotag wqe=payload_offset_len */
7381 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7540 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
@@ -7394,7 +7553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7394 LPFC_WQE_LENLOC_WORD4); 7553 LPFC_WQE_LENLOC_WORD4);
7395 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 7554 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
7396 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 7555 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
7397 break; 7556 break;
7398 case CMD_FCP_ICMND64_CR: 7557 case CMD_FCP_ICMND64_CR:
7399 /* word3 iocb=IO_TAG wqe=reserved */ 7558 /* word3 iocb=IO_TAG wqe=reserved */
7400 wqe->fcp_icmd.rsrvd3 = 0; 7559 wqe->fcp_icmd.rsrvd3 = 0;
@@ -7407,7 +7566,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7407 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 7566 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
7408 LPFC_WQE_LENLOC_NONE); 7567 LPFC_WQE_LENLOC_NONE);
7409 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 7568 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
7410 break; 7569 break;
7411 case CMD_GEN_REQUEST64_CR: 7570 case CMD_GEN_REQUEST64_CR:
7412 /* For this command calculate the xmit length of the 7571 /* For this command calculate the xmit length of the
7413 * request bde. 7572 * request bde.
@@ -7442,7 +7601,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7442 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7601 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7443 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 7602 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
7444 command_type = OTHER_COMMAND; 7603 command_type = OTHER_COMMAND;
7445 break; 7604 break;
7446 case CMD_XMIT_ELS_RSP64_CX: 7605 case CMD_XMIT_ELS_RSP64_CX:
7447 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7606 ndlp = (struct lpfc_nodelist *)iocbq->context1;
7448 /* words0-2 BDE memcpy */ 7607 /* words0-2 BDE memcpy */
@@ -7457,7 +7616,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7457 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7616 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7458 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 7617 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
7459 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7618 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7460 iocbq->iocb.ulpContext); 7619 iocbq->iocb.unsli3.rcvsli3.ox_id);
7461 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7620 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
7462 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7621 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7463 phba->vpi_ids[iocbq->vport->vpi]); 7622 phba->vpi_ids[iocbq->vport->vpi]);
@@ -7470,7 +7629,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7470 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 7629 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7471 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7630 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7472 command_type = OTHER_COMMAND; 7631 command_type = OTHER_COMMAND;
7473 break; 7632 break;
7474 case CMD_CLOSE_XRI_CN: 7633 case CMD_CLOSE_XRI_CN:
7475 case CMD_ABORT_XRI_CN: 7634 case CMD_ABORT_XRI_CN:
7476 case CMD_ABORT_XRI_CX: 7635 case CMD_ABORT_XRI_CX:
@@ -7509,7 +7668,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7509 cmnd = CMD_ABORT_XRI_CX; 7668 cmnd = CMD_ABORT_XRI_CX;
7510 command_type = OTHER_COMMAND; 7669 command_type = OTHER_COMMAND;
7511 xritag = 0; 7670 xritag = 0;
7512 break; 7671 break;
7513 case CMD_XMIT_BLS_RSP64_CX: 7672 case CMD_XMIT_BLS_RSP64_CX:
7514 /* As BLS ABTS RSP WQE is very different from other WQEs, 7673 /* As BLS ABTS RSP WQE is very different from other WQEs,
7515 * we re-construct this WQE here based on information in 7674 * we re-construct this WQE here based on information in
@@ -7553,7 +7712,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7553 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 7712 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
7554 } 7713 }
7555 7714
7556 break; 7715 break;
7557 case CMD_XRI_ABORTED_CX: 7716 case CMD_XRI_ABORTED_CX:
7558 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 7717 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
7559 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 7718 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -7565,7 +7724,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7565 "2014 Invalid command 0x%x\n", 7724 "2014 Invalid command 0x%x\n",
7566 iocbq->iocb.ulpCommand); 7725 iocbq->iocb.ulpCommand);
7567 return IOCB_ERROR; 7726 return IOCB_ERROR;
7568 break; 7727 break;
7569 } 7728 }
7570 7729
7571 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 7730 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
@@ -10481,10 +10640,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
10481 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 10640 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
10482 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 10641 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
10483 struct hbq_dmabuf *dma_buf; 10642 struct hbq_dmabuf *dma_buf;
10484 uint32_t status; 10643 uint32_t status, rq_id;
10485 unsigned long iflags; 10644 unsigned long iflags;
10486 10645
10487 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) 10646 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
10647 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
10648 else
10649 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
10650 if (rq_id != hrq->queue_id)
10488 goto out; 10651 goto out;
10489 10652
10490 status = bf_get(lpfc_rcqe_status, rcqe); 10653 status = bf_get(lpfc_rcqe_status, rcqe);
@@ -10563,6 +10726,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
10563 (struct sli4_wcqe_xri_aborted *)&cqevt); 10726 (struct sli4_wcqe_xri_aborted *)&cqevt);
10564 break; 10727 break;
10565 case CQE_CODE_RECEIVE: 10728 case CQE_CODE_RECEIVE:
10729 case CQE_CODE_RECEIVE_V1:
10566 /* Process the RQ event */ 10730 /* Process the RQ event */
10567 phba->last_completion_time = jiffies; 10731 phba->last_completion_time = jiffies;
10568 workposted = lpfc_sli4_sp_handle_rcqe(phba, 10732 workposted = lpfc_sli4_sp_handle_rcqe(phba,
@@ -12345,19 +12509,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
12345} 12509}
12346 12510
12347/** 12511/**
12348 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 12512 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
12349 * @phba: pointer to lpfc hba data structure. 12513 * @phba: pointer to lpfc hba data structure.
12350 * 12514 *
12351 * This routine is invoked to post rpi header templates to the 12515 * This routine is invoked to post rpi header templates to the
12352 * port for those SLI4 ports that do not support extents. This routine 12516 * HBA consistent with the SLI-4 interface spec. This routine
12353 * posts a PAGE_SIZE memory region to the port to hold up to 12517 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
12354 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 12518 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
12355 * and should be called only when interrupts are disabled.
12356 * 12519 *
12357 * Return codes 12520 * Returns
12358 * 0 - successful 12521 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
12359 * -ERROR - otherwise. 12522 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
12360 */ 12523 **/
12361uint16_t 12524uint16_t
12362lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 12525lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12363{ 12526{
@@ -13406,7 +13569,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
13406 * This function validates the xri maps to the known range of XRIs allocated an 13569 * This function validates the xri maps to the known range of XRIs allocated an
13407 * used by the driver. 13570 * used by the driver.
13408 **/ 13571 **/
13409static uint16_t 13572uint16_t
13410lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 13573lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13411 uint16_t xri) 13574 uint16_t xri)
13412{ 13575{
@@ -13643,10 +13806,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
13643static struct lpfc_iocbq * 13806static struct lpfc_iocbq *
13644lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 13807lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13645{ 13808{
13809 struct hbq_dmabuf *hbq_buf;
13646 struct lpfc_dmabuf *d_buf, *n_buf; 13810 struct lpfc_dmabuf *d_buf, *n_buf;
13647 struct lpfc_iocbq *first_iocbq, *iocbq; 13811 struct lpfc_iocbq *first_iocbq, *iocbq;
13648 struct fc_frame_header *fc_hdr; 13812 struct fc_frame_header *fc_hdr;
13649 uint32_t sid; 13813 uint32_t sid;
13814 uint32_t len, tot_len;
13650 struct ulp_bde64 *pbde; 13815 struct ulp_bde64 *pbde;
13651 13816
13652 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 13817 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
@@ -13655,6 +13820,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13655 lpfc_update_rcv_time_stamp(vport); 13820 lpfc_update_rcv_time_stamp(vport);
13656 /* get the Remote Port's SID */ 13821 /* get the Remote Port's SID */
13657 sid = sli4_sid_from_fc_hdr(fc_hdr); 13822 sid = sli4_sid_from_fc_hdr(fc_hdr);
13823 tot_len = 0;
13658 /* Get an iocbq struct to fill in. */ 13824 /* Get an iocbq struct to fill in. */
13659 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 13825 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
13660 if (first_iocbq) { 13826 if (first_iocbq) {
@@ -13662,9 +13828,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13662 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 13828 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
13663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 13829 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
13664 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 13830 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
13665 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 13831 first_iocbq->iocb.ulpContext = NO_XRI;
13666 /* iocbq is prepped for internal consumption. Logical vpi. */ 13832 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
13667 first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi; 13833 be16_to_cpu(fc_hdr->fh_ox_id);
13834 /* iocbq is prepped for internal consumption. Physical vpi. */
13835 first_iocbq->iocb.unsli3.rcvsli3.vpi =
13836 vport->phba->vpi_ids[vport->vpi];
13668 /* put the first buffer into the first IOCBq */ 13837 /* put the first buffer into the first IOCBq */
13669 first_iocbq->context2 = &seq_dmabuf->dbuf; 13838 first_iocbq->context2 = &seq_dmabuf->dbuf;
13670 first_iocbq->context3 = NULL; 13839 first_iocbq->context3 = NULL;
@@ -13672,9 +13841,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13672 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 13841 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
13673 LPFC_DATA_BUF_SIZE; 13842 LPFC_DATA_BUF_SIZE;
13674 first_iocbq->iocb.un.rcvels.remoteID = sid; 13843 first_iocbq->iocb.un.rcvels.remoteID = sid;
13675 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13844 tot_len = bf_get(lpfc_rcqe_length,
13676 bf_get(lpfc_rcqe_length,
13677 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13845 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
13846 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
13678 } 13847 }
13679 iocbq = first_iocbq; 13848 iocbq = first_iocbq;
13680 /* 13849 /*
@@ -13692,9 +13861,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13692 pbde = (struct ulp_bde64 *) 13861 pbde = (struct ulp_bde64 *)
13693 &iocbq->iocb.unsli3.sli3Words[4]; 13862 &iocbq->iocb.unsli3.sli3Words[4];
13694 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 13863 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
13695 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13864
13696 bf_get(lpfc_rcqe_length, 13865 /* We need to get the size out of the right CQE */
13697 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13866 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13867 len = bf_get(lpfc_rcqe_length,
13868 &hbq_buf->cq_event.cqe.rcqe_cmpl);
13869 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
13870 tot_len += len;
13698 } else { 13871 } else {
13699 iocbq = lpfc_sli_get_iocbq(vport->phba); 13872 iocbq = lpfc_sli_get_iocbq(vport->phba);
13700 if (!iocbq) { 13873 if (!iocbq) {
@@ -13712,9 +13885,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13712 iocbq->iocb.ulpBdeCount = 1; 13885 iocbq->iocb.ulpBdeCount = 1;
13713 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 13886 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
13714 LPFC_DATA_BUF_SIZE; 13887 LPFC_DATA_BUF_SIZE;
13715 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13888
13716 bf_get(lpfc_rcqe_length, 13889 /* We need to get the size out of the right CQE */
13717 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13890 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13891 len = bf_get(lpfc_rcqe_length,
13892 &hbq_buf->cq_event.cqe.rcqe_cmpl);
13893 tot_len += len;
13894 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
13895
13718 iocbq->iocb.un.rcvels.remoteID = sid; 13896 iocbq->iocb.un.rcvels.remoteID = sid;
13719 list_add_tail(&iocbq->list, &first_iocbq->list); 13897 list_add_tail(&iocbq->list, &first_iocbq->list);
13720 } 13898 }
@@ -13787,7 +13965,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
13787 lpfc_in_buf_free(phba, &dmabuf->dbuf); 13965 lpfc_in_buf_free(phba, &dmabuf->dbuf);
13788 return; 13966 return;
13789 } 13967 }
13790 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); 13968 if ((bf_get(lpfc_cqe_code,
13969 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
13970 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
13971 &dmabuf->cq_event.cqe.rcqe_cmpl);
13972 else
13973 fcfi = bf_get(lpfc_rcqe_fcf_id,
13974 &dmabuf->cq_event.cqe.rcqe_cmpl);
13791 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 13975 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
13792 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 13976 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
13793 /* throw out the frame */ 13977 /* throw out the frame */
@@ -14451,6 +14635,92 @@ fail_fcf_read:
14451} 14635}
14452 14636
14453/** 14637/**
14638 * lpfc_check_next_fcf_pri
14639 * phba pointer to the lpfc_hba struct for this port.
14640 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
14641 * routine when the rr_bmask is empty. The FCF indecies are put into the
14642 * rr_bmask based on their priority level. Starting from the highest priority
14643 * to the lowest. The most likely FCF candidate will be in the highest
14644 * priority group. When this routine is called it searches the fcf_pri list for
14645 * next lowest priority group and repopulates the rr_bmask with only those
14646 * fcf_indexes.
14647 * returns:
14648 * 1=success 0=failure
14649 **/
14650int
14651lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
14652{
14653 uint16_t next_fcf_pri;
14654 uint16_t last_index;
14655 struct lpfc_fcf_pri *fcf_pri;
14656 int rc;
14657 int ret = 0;
14658
14659 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
14660 LPFC_SLI4_FCF_TBL_INDX_MAX);
14661 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
14662 "3060 Last IDX %d\n", last_index);
14663 if (list_empty(&phba->fcf.fcf_pri_list)) {
14664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
14665 "3061 Last IDX %d\n", last_index);
14666 return 0; /* Empty rr list */
14667 }
14668 next_fcf_pri = 0;
14669 /*
14670 * Clear the rr_bmask and set all of the bits that are at this
14671 * priority.
14672 */
14673 memset(phba->fcf.fcf_rr_bmask, 0,
14674 sizeof(*phba->fcf.fcf_rr_bmask));
14675 spin_lock_irq(&phba->hbalock);
14676 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14677 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
14678 continue;
14679 /*
14680 * the 1st priority that has not FLOGI failed
14681 * will be the highest.
14682 */
14683 if (!next_fcf_pri)
14684 next_fcf_pri = fcf_pri->fcf_rec.priority;
14685 spin_unlock_irq(&phba->hbalock);
14686 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14687 rc = lpfc_sli4_fcf_rr_index_set(phba,
14688 fcf_pri->fcf_rec.fcf_index);
14689 if (rc)
14690 return 0;
14691 }
14692 spin_lock_irq(&phba->hbalock);
14693 }
14694 /*
14695 * if next_fcf_pri was not set above and the list is not empty then
14696 * we have failed flogis on all of them. So reset flogi failed
14697 * and start at the begining.
14698 */
14699 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
14700 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14701 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
14702 /*
14703 * the 1st priority that has not FLOGI failed
14704 * will be the highest.
14705 */
14706 if (!next_fcf_pri)
14707 next_fcf_pri = fcf_pri->fcf_rec.priority;
14708 spin_unlock_irq(&phba->hbalock);
14709 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14710 rc = lpfc_sli4_fcf_rr_index_set(phba,
14711 fcf_pri->fcf_rec.fcf_index);
14712 if (rc)
14713 return 0;
14714 }
14715 spin_lock_irq(&phba->hbalock);
14716 }
14717 } else
14718 ret = 1;
14719 spin_unlock_irq(&phba->hbalock);
14720
14721 return ret;
14722}
14723/**
14454 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 14724 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
14455 * @phba: pointer to lpfc hba data structure. 14725 * @phba: pointer to lpfc hba data structure.
14456 * 14726 *
@@ -14466,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
14466 uint16_t next_fcf_index; 14736 uint16_t next_fcf_index;
14467 14737
14468 /* Search start from next bit of currently registered FCF index */ 14738 /* Search start from next bit of currently registered FCF index */
14739next_priority:
14469 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 14740 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
14470 LPFC_SLI4_FCF_TBL_INDX_MAX; 14741 LPFC_SLI4_FCF_TBL_INDX_MAX;
14471 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14742 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
@@ -14473,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
14473 next_fcf_index); 14744 next_fcf_index);
14474 14745
14475 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 14746 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
14476 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 14747 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
14748 /*
14749 * If we have wrapped then we need to clear the bits that
14750 * have been tested so that we can detect when we should
14751 * change the priority level.
14752 */
14477 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14753 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
14478 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 14754 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
14755 }
14756
14479 14757
14480 /* Check roundrobin failover list empty condition */ 14758 /* Check roundrobin failover list empty condition */
14481 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14759 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
14760 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
14761 /*
14762 * If next fcf index is not found check if there are lower
14763 * Priority level fcf's in the fcf_priority list.
14764 * Set up the rr_bmask with all of the avaiable fcf bits
14765 * at that level and continue the selection process.
14766 */
14767 if (lpfc_check_next_fcf_pri_level(phba))
14768 goto next_priority;
14482 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 14769 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
14483 "2844 No roundrobin failover FCF available\n"); 14770 "2844 No roundrobin failover FCF available\n");
14484 return LPFC_FCOE_FCF_NEXT_NONE; 14771 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
14772 return LPFC_FCOE_FCF_NEXT_NONE;
14773 else {
14774 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
14775 "3063 Only FCF available idx %d, flag %x\n",
14776 next_fcf_index,
14777 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
14778 return next_fcf_index;
14779 }
14485 } 14780 }
14486 14781
14782 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
14783 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
14784 LPFC_FCF_FLOGI_FAILED)
14785 goto next_priority;
14786
14487 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14787 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
14488 "2845 Get next roundrobin failover FCF (x%x)\n", 14788 "2845 Get next roundrobin failover FCF (x%x)\n",
14489 next_fcf_index); 14789 next_fcf_index);
@@ -14535,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
14535void 14835void
14536lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 14836lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
14537{ 14837{
14838 struct lpfc_fcf_pri *fcf_pri;
14538 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14839 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
14539 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14840 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
14540 "2762 FCF (x%x) reached driver's book " 14841 "2762 FCF (x%x) reached driver's book "
@@ -14543,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
14543 return; 14844 return;
14544 } 14845 }
14545 /* Clear the eligible FCF record index bmask */ 14846 /* Clear the eligible FCF record index bmask */
14847 spin_lock_irq(&phba->hbalock);
14848 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14849 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
14850 list_del_init(&fcf_pri->list);
14851 break;
14852 }
14853 }
14854 spin_unlock_irq(&phba->hbalock);
14546 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 14855 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
14547 14856
14548 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14857 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4b1703554a26..19bb87ae8597 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -81,6 +81,8 @@
81 (fc_hdr)->fh_f_ctl[1] << 8 | \ 81 (fc_hdr)->fh_f_ctl[1] << 8 | \
82 (fc_hdr)->fh_f_ctl[2]) 82 (fc_hdr)->fh_f_ctl[2])
83 83
84#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
85
84enum lpfc_sli4_queue_type { 86enum lpfc_sli4_queue_type {
85 LPFC_EQ, 87 LPFC_EQ,
86 LPFC_GCQ, 88 LPFC_GCQ,
@@ -157,6 +159,25 @@ struct lpfc_fcf_rec {
157#define RECORD_VALID 0x02 159#define RECORD_VALID 0x02
158}; 160};
159 161
162struct lpfc_fcf_pri_rec {
163 uint16_t fcf_index;
164#define LPFC_FCF_ON_PRI_LIST 0x0001
165#define LPFC_FCF_FLOGI_FAILED 0x0002
166 uint16_t flag;
167 uint32_t priority;
168};
169
170struct lpfc_fcf_pri {
171 struct list_head list;
172 struct lpfc_fcf_pri_rec fcf_rec;
173};
174
175/*
176 * Maximum FCF table index, it is for driver internal book keeping, it
177 * just needs to be no less than the supported HBA's FCF table size.
178 */
179#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
180
160struct lpfc_fcf { 181struct lpfc_fcf {
161 uint16_t fcfi; 182 uint16_t fcfi;
162 uint32_t fcf_flag; 183 uint32_t fcf_flag;
@@ -176,15 +197,13 @@ struct lpfc_fcf {
176 uint32_t eligible_fcf_cnt; 197 uint32_t eligible_fcf_cnt;
177 struct lpfc_fcf_rec current_rec; 198 struct lpfc_fcf_rec current_rec;
178 struct lpfc_fcf_rec failover_rec; 199 struct lpfc_fcf_rec failover_rec;
200 struct list_head fcf_pri_list;
201 struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
202 uint32_t current_fcf_scan_pri;
179 struct timer_list redisc_wait; 203 struct timer_list redisc_wait;
180 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ 204 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
181}; 205};
182 206
183/*
184 * Maximum FCF table index, it is for driver internal book keeping, it
185 * just needs to be no less than the supported HBA's FCF table size.
186 */
187#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
188 207
189#define LPFC_REGION23_SIGNATURE "RG23" 208#define LPFC_REGION23_SIGNATURE "RG23"
190#define LPFC_REGION23_VERSION 1 209#define LPFC_REGION23_VERSION 1
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c03921b1232c..c1e0ae94d9f4 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.23" 21#define LPFC_DRIVER_VERSION "8.3.25"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 7370c084b178..3948a00d81f4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.05.38-rc1" 36#define MEGASAS_VERSION "00.00.05.40-rc1"
37#define MEGASAS_RELDATE "May. 11, 2011" 37#define MEGASAS_RELDATE "Jul. 26, 2011"
38#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2d8cdce7b2f5..776d01988660 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.05.38-rc1 21 * Version : v00.00.05.40-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -54,6 +54,7 @@
54#include <scsi/scsi_cmnd.h> 54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h> 55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h> 56#include <scsi/scsi_host.h>
57#include <scsi/scsi_tcq.h>
57#include "megaraid_sas_fusion.h" 58#include "megaraid_sas_fusion.h"
58#include "megaraid_sas.h" 59#include "megaraid_sas.h"
59 60
@@ -2057,6 +2058,20 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2057 } 2058 }
2058} 2059}
2059 2060
2061static int megasas_change_queue_depth(struct scsi_device *sdev,
2062 int queue_depth, int reason)
2063{
2064 if (reason != SCSI_QDEPTH_DEFAULT)
2065 return -EOPNOTSUPP;
2066
2067 if (queue_depth > sdev->host->can_queue)
2068 queue_depth = sdev->host->can_queue;
2069 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
2070 queue_depth);
2071
2072 return queue_depth;
2073}
2074
2060/* 2075/*
2061 * Scsi host template for megaraid_sas driver 2076 * Scsi host template for megaraid_sas driver
2062 */ 2077 */
@@ -2074,6 +2089,7 @@ static struct scsi_host_template megasas_template = {
2074 .eh_timed_out = megasas_reset_timer, 2089 .eh_timed_out = megasas_reset_timer,
2075 .bios_param = megasas_bios_param, 2090 .bios_param = megasas_bios_param,
2076 .use_clustering = ENABLE_CLUSTERING, 2091 .use_clustering = ENABLE_CLUSTERING,
2092 .change_queue_depth = megasas_change_queue_depth,
2077}; 2093};
2078 2094
2079/** 2095/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 8fe3a45794fc..5a5af1fe7581 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -288,7 +288,6 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
288 /* Get dev handle from Pd */ 288 /* Get dev handle from Pd */
289 *pDevHandle = MR_PdDevHandleGet(pd, map); 289 *pDevHandle = MR_PdDevHandleGet(pd, map);
290 } 290 }
291 retval = FALSE;
292 } 291 }
293 292
294 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 293 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 939f283d0c28..6abd2fcc43e2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4258,6 +4258,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4258 u32 log_info; 4258 u32 log_info;
4259 struct MPT2SAS_DEVICE *sas_device_priv_data; 4259 struct MPT2SAS_DEVICE *sas_device_priv_data;
4260 u32 response_code = 0; 4260 u32 response_code = 0;
4261 unsigned long flags;
4261 4262
4262 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 4263 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
4263 scmd = _scsih_scsi_lookup_get_clear(ioc, smid); 4264 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4282,6 +4283,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4282 * the failed direct I/O should be redirected to volume 4283 * the failed direct I/O should be redirected to volume
4283 */ 4284 */
4284 if (_scsih_scsi_direct_io_get(ioc, smid)) { 4285 if (_scsih_scsi_direct_io_get(ioc, smid)) {
4286 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4287 ioc->scsi_lookup[smid - 1].scmd = scmd;
4288 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4285 _scsih_scsi_direct_io_set(ioc, smid, 0); 4289 _scsih_scsi_direct_io_set(ioc, smid, 0);
4286 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 4290 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4287 mpi_request->DevHandle = 4291 mpi_request->DevHandle =
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index c82b012aba37..78f7e20a0c1c 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
6# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com> 6# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
7# 7#
8# This file is licensed under GPLv2. 8# This file is licensed under GPLv2.
9# 9#
@@ -41,3 +41,10 @@ config SCSI_MVSAS_DEBUG
41 help 41 help
42 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, 42 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
43 the driver prints some messages to the console. 43 the driver prints some messages to the console.
44config SCSI_MVSAS_TASKLET
45 bool "Support for interrupt tasklet"
46 default n
47 depends on SCSI_MVSAS
48 help
49 Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode,
50 the interrupt will schedule a tasklet.
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 13c960481391..8ba47229049f 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -33,7 +33,6 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
33 u32 reg; 33 u32 reg;
34 struct mvs_phy *phy = &mvi->phy[i]; 34 struct mvs_phy *phy = &mvi->phy[i];
35 35
36 /* TODO check & save device type */
37 reg = mr32(MVS_GBL_PORT_TYPE); 36 reg = mr32(MVS_GBL_PORT_TYPE);
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 if (reg & MODE_SAS_SATA & (1 << i)) 38 if (reg & MODE_SAS_SATA & (1 << i))
@@ -48,7 +47,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
48 u32 tmp; 47 u32 tmp;
49 48
50 tmp = mr32(MVS_PCS); 49 tmp = mr32(MVS_PCS);
51 if (mvi->chip->n_phy <= 4) 50 if (mvi->chip->n_phy <= MVS_SOC_PORTS)
52 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); 51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
53 else 52 else
54 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); 53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
@@ -58,24 +57,16 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
58static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi) 57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
59{ 58{
60 void __iomem *regs = mvi->regs; 59 void __iomem *regs = mvi->regs;
60 int i;
61 61
62 mvs_phy_hacks(mvi); 62 mvs_phy_hacks(mvi);
63 63
64 if (!(mvi->flags & MVF_FLAG_SOC)) { 64 if (!(mvi->flags & MVF_FLAG_SOC)) {
65 /* TEST - for phy decoding error, adjust voltage levels */ 65 for (i = 0; i < MVS_SOC_PORTS; i++) {
66 mw32(MVS_P0_VSR_ADDR + 0, 0x8); 66 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
67 mw32(MVS_P0_VSR_DATA + 0, 0x2F0); 67 mvs_write_port_vsr_data(mvi, i, 0x2F0);
68 68 }
69 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
70 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
71
72 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
73 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
74
75 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
76 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
77 } else { 69 } else {
78 int i;
79 /* disable auto port detection */ 70 /* disable auto port detection */
80 mw32(MVS_GBL_PORT_TYPE, 0); 71 mw32(MVS_GBL_PORT_TYPE, 0);
81 for (i = 0; i < mvi->chip->n_phy; i++) { 72 for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -95,7 +86,7 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
95 u32 reg, tmp; 86 u32 reg, tmp;
96 87
97 if (!(mvi->flags & MVF_FLAG_SOC)) { 88 if (!(mvi->flags & MVF_FLAG_SOC)) {
98 if (phy_id < 4) 89 if (phy_id < MVS_SOC_PORTS)
99 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg); 90 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
100 else 91 else
101 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg); 92 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
@@ -104,13 +95,13 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
104 reg = mr32(MVS_PHY_CTL); 95 reg = mr32(MVS_PHY_CTL);
105 96
106 tmp = reg; 97 tmp = reg;
107 if (phy_id < 4) 98 if (phy_id < MVS_SOC_PORTS)
108 tmp |= (1U << phy_id) << PCTL_LINK_OFFS; 99 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
109 else 100 else
110 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS; 101 tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
111 102
112 if (!(mvi->flags & MVF_FLAG_SOC)) { 103 if (!(mvi->flags & MVF_FLAG_SOC)) {
113 if (phy_id < 4) { 104 if (phy_id < MVS_SOC_PORTS) {
114 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); 105 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
115 mdelay(10); 106 mdelay(10);
116 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); 107 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
@@ -133,9 +124,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
133 tmp &= ~PHYEV_RDY_CH; 124 tmp &= ~PHYEV_RDY_CH;
134 mvs_write_port_irq_stat(mvi, phy_id, tmp); 125 mvs_write_port_irq_stat(mvi, phy_id, tmp);
135 tmp = mvs_read_phy_ctl(mvi, phy_id); 126 tmp = mvs_read_phy_ctl(mvi, phy_id);
136 if (hard == 1) 127 if (hard == MVS_HARD_RESET)
137 tmp |= PHY_RST_HARD; 128 tmp |= PHY_RST_HARD;
138 else if (hard == 0) 129 else if (hard == MVS_SOFT_RESET)
139 tmp |= PHY_RST; 130 tmp |= PHY_RST;
140 mvs_write_phy_ctl(mvi, phy_id, tmp); 131 mvs_write_phy_ctl(mvi, phy_id, tmp);
141 if (hard) { 132 if (hard) {
@@ -321,6 +312,11 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
321 /* init phys */ 312 /* init phys */
322 mvs_64xx_phy_hacks(mvi); 313 mvs_64xx_phy_hacks(mvi);
323 314
315 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
316 tmp &= 0x0000ffff;
317 tmp |= 0x00fa0000;
318 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
319
324 /* enable auto port detection */ 320 /* enable auto port detection */
325 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); 321 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
326 322
@@ -346,7 +342,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
346 342
347 mvs_64xx_enable_xmt(mvi, i); 343 mvs_64xx_enable_xmt(mvi, i);
348 344
349 mvs_64xx_phy_reset(mvi, i, 1); 345 mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
350 msleep(500); 346 msleep(500);
351 mvs_64xx_detect_porttype(mvi, i); 347 mvs_64xx_detect_porttype(mvi, i);
352 } 348 }
@@ -377,13 +373,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
377 mvs_update_phyinfo(mvi, i, 1); 373 mvs_update_phyinfo(mvi, i, 1);
378 } 374 }
379 375
380 /* FIXME: update wide port bitmaps */
381
382 /* little endian for open address and command table, etc. */ 376 /* little endian for open address and command table, etc. */
383 /*
384 * it seems that ( from the spec ) turning on big-endian won't
385 * do us any good on big-endian machines, need further confirmation
386 */
387 cctl = mr32(MVS_CTL); 377 cctl = mr32(MVS_CTL);
388 cctl |= CCTL_ENDIAN_CMD; 378 cctl |= CCTL_ENDIAN_CMD;
389 cctl |= CCTL_ENDIAN_DATA; 379 cctl |= CCTL_ENDIAN_DATA;
@@ -394,15 +384,19 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
394 /* reset CMD queue */ 384 /* reset CMD queue */
395 tmp = mr32(MVS_PCS); 385 tmp = mr32(MVS_PCS);
396 tmp |= PCS_CMD_RST; 386 tmp |= PCS_CMD_RST;
387 tmp &= ~PCS_SELF_CLEAR;
397 mw32(MVS_PCS, tmp); 388 mw32(MVS_PCS, tmp);
398 /* interrupt coalescing may cause missing HW interrput in some case, 389 /*
399 * and the max count is 0x1ff, while our max slot is 0x200, 390 * the max count is 0x1ff, while our max slot is 0x200,
400 * it will make count 0. 391 * it will make count 0.
401 */ 392 */
402 tmp = 0; 393 tmp = 0;
403 mw32(MVS_INT_COAL, tmp); 394 if (MVS_CHIP_SLOT_SZ > 0x1ff)
395 mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
396 else
397 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
404 398
405 tmp = 0x100; 399 tmp = 0x10000 | interrupt_coalescing;
406 mw32(MVS_INT_COAL_TMOUT, tmp); 400 mw32(MVS_INT_COAL_TMOUT, tmp);
407 401
408 /* ladies and gentlemen, start your engines */ 402 /* ladies and gentlemen, start your engines */
@@ -477,13 +471,11 @@ static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
477 471
478 /* clear CMD_CMPLT ASAP */ 472 /* clear CMD_CMPLT ASAP */
479 mw32_f(MVS_INT_STAT, CINT_DONE); 473 mw32_f(MVS_INT_STAT, CINT_DONE);
480#ifndef MVS_USE_TASKLET 474
481 spin_lock(&mvi->lock); 475 spin_lock(&mvi->lock);
482#endif
483 mvs_int_full(mvi); 476 mvs_int_full(mvi);
484#ifndef MVS_USE_TASKLET
485 spin_unlock(&mvi->lock); 477 spin_unlock(&mvi->lock);
486#endif 478
487 return IRQ_HANDLED; 479 return IRQ_HANDLED;
488} 480}
489 481
@@ -630,7 +622,6 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
630{ 622{
631 u32 tmp; 623 u32 tmp;
632 struct mvs_phy *phy = &mvi->phy[i]; 624 struct mvs_phy *phy = &mvi->phy[i];
633 /* workaround for HW phy decoding error on 1.5g disk drive */
634 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); 625 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
635 tmp = mvs_read_port_vsr_data(mvi, i); 626 tmp = mvs_read_port_vsr_data(mvi, i);
636 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 627 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
@@ -661,7 +652,7 @@ void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
661 tmp |= lrmax; 652 tmp |= lrmax;
662 } 653 }
663 mvs_write_phy_ctl(mvi, phy_id, tmp); 654 mvs_write_phy_ctl(mvi, phy_id, tmp);
664 mvs_64xx_phy_reset(mvi, phy_id, 1); 655 mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
665} 656}
666 657
667static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) 658static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
@@ -744,11 +735,13 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
744 return -1; 735 return -1;
745} 736}
746 737
747#ifndef DISABLE_HOTPLUG_DMA_FIX 738void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
748void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) 739 int buf_len, int from, void *prd)
749{ 740{
750 int i; 741 int i;
751 struct mvs_prd *buf_prd = prd; 742 struct mvs_prd *buf_prd = prd;
743 dma_addr_t buf_dma = mvi->bulk_buffer_dma;
744
752 buf_prd += from; 745 buf_prd += from;
753 for (i = 0; i < MAX_SG_ENTRY - from; i++) { 746 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
754 buf_prd->addr = cpu_to_le64(buf_dma); 747 buf_prd->addr = cpu_to_le64(buf_dma);
@@ -756,7 +749,28 @@ void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
756 ++buf_prd; 749 ++buf_prd;
757 } 750 }
758} 751}
759#endif 752
753static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
754{
755 void __iomem *regs = mvi->regs;
756 u32 tmp = 0;
757 /*
758 * the max count is 0x1ff, while our max slot is 0x200,
759 * it will make count 0.
760 */
761 if (time == 0) {
762 mw32(MVS_INT_COAL, 0);
763 mw32(MVS_INT_COAL_TMOUT, 0x10000);
764 } else {
765 if (MVS_CHIP_SLOT_SZ > 0x1ff)
766 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
767 else
768 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
769
770 tmp = 0x10000 | time;
771 mw32(MVS_INT_COAL_TMOUT, tmp);
772 }
773}
760 774
761const struct mvs_dispatch mvs_64xx_dispatch = { 775const struct mvs_dispatch mvs_64xx_dispatch = {
762 "mv64xx", 776 "mv64xx",
@@ -780,7 +794,6 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
780 mvs_write_port_irq_stat, 794 mvs_write_port_irq_stat,
781 mvs_read_port_irq_mask, 795 mvs_read_port_irq_mask,
782 mvs_write_port_irq_mask, 796 mvs_write_port_irq_mask,
783 mvs_get_sas_addr,
784 mvs_64xx_command_active, 797 mvs_64xx_command_active,
785 mvs_64xx_clear_srs_irq, 798 mvs_64xx_clear_srs_irq,
786 mvs_64xx_issue_stop, 799 mvs_64xx_issue_stop,
@@ -808,8 +821,8 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
808 mvs_64xx_spi_buildcmd, 821 mvs_64xx_spi_buildcmd,
809 mvs_64xx_spi_issuecmd, 822 mvs_64xx_spi_issuecmd,
810 mvs_64xx_spi_waitdataready, 823 mvs_64xx_spi_waitdataready,
811#ifndef DISABLE_HOTPLUG_DMA_FIX
812 mvs_64xx_fix_dma, 824 mvs_64xx_fix_dma,
813#endif 825 mvs_64xx_tune_interrupt,
826 NULL,
814}; 827};
815 828
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 78162c3c36e6..3501291618fd 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -48,6 +48,216 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
48 } 48 }
49} 49}
50 50
51void set_phy_tuning(struct mvs_info *mvi, int phy_id,
52 struct phy_tuning phy_tuning)
53{
54 u32 tmp, setting_0 = 0, setting_1 = 0;
55 u8 i;
56
57 /* Remap information for B0 chip:
58 *
59 * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
60 * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
61 * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
62 * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
63 * R10h -> R120h[15:0] (Generation 2 Setting 1)
64 * R11h -> R120h[31:16] (Generation 3 Setting 0)
65 * R12h -> R124h[15:0] (Generation 3 Setting 1)
66 * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
67 */
68
69 /* A0 has a different set of registers */
70 if (mvi->pdev->revision == VANIR_A0_REV)
71 return;
72
73 for (i = 0; i < 3; i++) {
74 /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
75 switch (i) {
76 case 0:
77 setting_0 = GENERATION_1_SETTING;
78 setting_1 = GENERATION_1_2_SETTING;
79 break;
80 case 1:
81 setting_0 = GENERATION_1_2_SETTING;
82 setting_1 = GENERATION_2_3_SETTING;
83 break;
84 case 2:
85 setting_0 = GENERATION_2_3_SETTING;
86 setting_1 = GENERATION_3_4_SETTING;
87 break;
88 }
89
90 /* Set:
91 *
92 * Transmitter Emphasis Enable
93 * Transmitter Emphasis Amplitude
94 * Transmitter Amplitude
95 */
96 mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
97 tmp = mvs_read_port_vsr_data(mvi, phy_id);
98 tmp &= ~(0xFBE << 16);
99 tmp |= (((phy_tuning.trans_emp_en << 11) |
100 (phy_tuning.trans_emp_amp << 7) |
101 (phy_tuning.trans_amp << 1)) << 16);
102 mvs_write_port_vsr_data(mvi, phy_id, tmp);
103
104 /* Set Transmitter Amplitude Adjust */
105 mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
106 tmp = mvs_read_port_vsr_data(mvi, phy_id);
107 tmp &= ~(0xC000);
108 tmp |= (phy_tuning.trans_amp_adj << 14);
109 mvs_write_port_vsr_data(mvi, phy_id, tmp);
110 }
111}
112
113void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
114 struct ffe_control ffe)
115{
116 u32 tmp;
117
118 /* Don't run this if A0/B0 */
119 if ((mvi->pdev->revision == VANIR_A0_REV)
120 || (mvi->pdev->revision == VANIR_B0_REV))
121 return;
122
123 /* FFE Resistor and Capacitor */
124 /* R10Ch DFE Resolution Control/Squelch and FFE Setting
125 *
126 * FFE_FORCE [7]
127 * FFE_RES_SEL [6:4]
128 * FFE_CAP_SEL [3:0]
129 */
130 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
131 tmp = mvs_read_port_vsr_data(mvi, phy_id);
132 tmp &= ~0xFF;
133
134 /* Read from HBA_Info_Page */
135 tmp |= ((0x1 << 7) |
136 (ffe.ffe_rss_sel << 4) |
137 (ffe.ffe_cap_sel << 0));
138
139 mvs_write_port_vsr_data(mvi, phy_id, tmp);
140
141 /* R064h PHY Mode Register 1
142 *
143 * DFE_DIS 18
144 */
145 mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
146 tmp = mvs_read_port_vsr_data(mvi, phy_id);
147 tmp &= ~0x40001;
148 /* Hard coding */
149 /* No defines in HBA_Info_Page */
150 tmp |= (0 << 18);
151 mvs_write_port_vsr_data(mvi, phy_id, tmp);
152
153 /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
154 *
155 * DFE_UPDATE_EN [11:6]
156 * DFE_FX_FORCE [5:0]
157 */
158 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
159 tmp = mvs_read_port_vsr_data(mvi, phy_id);
160 tmp &= ~0xFFF;
161 /* Hard coding */
162 /* No defines in HBA_Info_Page */
163 tmp |= ((0x3F << 6) | (0x0 << 0));
164 mvs_write_port_vsr_data(mvi, phy_id, tmp);
165
166 /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
167 *
168 * FFE_TRAIN_EN 3
169 */
170 mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
171 tmp = mvs_read_port_vsr_data(mvi, phy_id);
172 tmp &= ~0x8;
173 /* Hard coding */
174 /* No defines in HBA_Info_Page */
175 tmp |= (0 << 3);
176 mvs_write_port_vsr_data(mvi, phy_id, tmp);
177}
178
179/*Notice: this function must be called when phy is disabled*/
180void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
181{
182 union reg_phy_cfg phy_cfg, phy_cfg_tmp;
183 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
184 phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
185 phy_cfg.v = 0;
186 phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
187 phy_cfg.u.sas_support = 1;
188 phy_cfg.u.sata_support = 1;
189 phy_cfg.u.sata_host_mode = 1;
190
191 switch (rate) {
192 case 0x0:
193 /* support 1.5 Gbps */
194 phy_cfg.u.speed_support = 1;
195 phy_cfg.u.snw_3_support = 0;
196 phy_cfg.u.tx_lnk_parity = 1;
197 phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
198 break;
199 case 0x1:
200
201 /* support 1.5, 3.0 Gbps */
202 phy_cfg.u.speed_support = 3;
203 phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
204 phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
205 break;
206 case 0x2:
207 default:
208 /* support 1.5, 3.0, 6.0 Gbps */
209 phy_cfg.u.speed_support = 7;
210 phy_cfg.u.snw_3_support = 1;
211 phy_cfg.u.tx_lnk_parity = 1;
212 phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
213 phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
214 break;
215 }
216 mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
217}
218
219static void __devinit
220mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
221{
222 u32 temp;
223 temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
224 if (temp == 0xFFFFFFFFL) {
225 mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
226 mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
227 mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
228 }
229
230 temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
231 if (temp == 0xFFL) {
232 switch (mvi->pdev->revision) {
233 case VANIR_A0_REV:
234 case VANIR_B0_REV:
235 mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
236 mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
237 break;
238 case VANIR_C0_REV:
239 case VANIR_C1_REV:
240 case VANIR_C2_REV:
241 default:
242 mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
243 mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
244 break;
245 }
246 }
247
248 temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
249 if (temp == 0xFFL)
250 /*set default phy_rate = 6Gbps*/
251 mvi->hba_info_param.phy_rate[phy_id] = 0x2;
252
253 set_phy_tuning(mvi, phy_id,
254 mvi->hba_info_param.phy_tuning[phy_id]);
255 set_phy_ffe_tuning(mvi, phy_id,
256 mvi->hba_info_param.ffe_ctl[phy_id]);
257 set_phy_rate(mvi, phy_id,
258 mvi->hba_info_param.phy_rate[phy_id]);
259}
260
51static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) 261static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
52{ 262{
53 void __iomem *regs = mvi->regs; 263 void __iomem *regs = mvi->regs;
@@ -61,7 +271,14 @@ static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
61static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) 271static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
62{ 272{
63 u32 tmp; 273 u32 tmp;
64 274 u32 delay = 5000;
275 if (hard == MVS_PHY_TUNE) {
276 mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL);
277 tmp = mvs_read_port_cfg_data(mvi, phy_id);
278 mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000);
279 mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000);
280 return;
281 }
65 tmp = mvs_read_port_irq_stat(mvi, phy_id); 282 tmp = mvs_read_port_irq_stat(mvi, phy_id);
66 tmp &= ~PHYEV_RDY_CH; 283 tmp &= ~PHYEV_RDY_CH;
67 mvs_write_port_irq_stat(mvi, phy_id, tmp); 284 mvs_write_port_irq_stat(mvi, phy_id, tmp);
@@ -71,12 +288,15 @@ static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
71 mvs_write_phy_ctl(mvi, phy_id, tmp); 288 mvs_write_phy_ctl(mvi, phy_id, tmp);
72 do { 289 do {
73 tmp = mvs_read_phy_ctl(mvi, phy_id); 290 tmp = mvs_read_phy_ctl(mvi, phy_id);
74 } while (tmp & PHY_RST_HARD); 291 udelay(10);
292 delay--;
293 } while ((tmp & PHY_RST_HARD) && delay);
294 if (!delay)
295 mv_dprintk("phy hard reset failed.\n");
75 } else { 296 } else {
76 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT); 297 tmp = mvs_read_phy_ctl(mvi, phy_id);
77 tmp = mvs_read_port_vsr_data(mvi, phy_id);
78 tmp |= PHY_RST; 298 tmp |= PHY_RST;
79 mvs_write_port_vsr_data(mvi, phy_id, tmp); 299 mvs_write_phy_ctl(mvi, phy_id, tmp);
80 } 300 }
81} 301}
82 302
@@ -90,12 +310,25 @@ static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
90 310
91static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) 311static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
92{ 312{
93 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4); 313 u32 tmp;
94 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); 314 u8 revision = 0;
95 mvs_write_port_vsr_addr(mvi, phy_id, 0x104); 315
96 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080); 316 revision = mvi->pdev->revision;
317 if (revision == VANIR_A0_REV) {
318 mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
319 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
320 }
321 if (revision == VANIR_B0_REV) {
322 mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
323 mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
324 mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
325 mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
326 }
327
97 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); 328 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
98 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff); 329 tmp = mvs_read_port_vsr_data(mvi, phy_id);
330 tmp |= bit(0);
331 mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
99} 332}
100 333
101static int __devinit mvs_94xx_init(struct mvs_info *mvi) 334static int __devinit mvs_94xx_init(struct mvs_info *mvi)
@@ -103,7 +336,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
103 void __iomem *regs = mvi->regs; 336 void __iomem *regs = mvi->regs;
104 int i; 337 int i;
105 u32 tmp, cctl; 338 u32 tmp, cctl;
339 u8 revision;
106 340
341 revision = mvi->pdev->revision;
107 mvs_show_pcie_usage(mvi); 342 mvs_show_pcie_usage(mvi);
108 if (mvi->flags & MVF_FLAG_SOC) { 343 if (mvi->flags & MVF_FLAG_SOC) {
109 tmp = mr32(MVS_PHY_CTL); 344 tmp = mr32(MVS_PHY_CTL);
@@ -133,6 +368,28 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
133 msleep(100); 368 msleep(100);
134 } 369 }
135 370
371 /* disable Multiplexing, enable phy implemented */
372 mw32(MVS_PORTS_IMP, 0xFF);
373
374 if (revision == VANIR_A0_REV) {
375 mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
376 mw32(MVS_PA_VSR_PORT, 0x00018080);
377 }
378 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
379 if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
380 /* set 6G/3G/1.5G, multiplexing, without SSC */
381 mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
382 else
383 /* set 6G/3G/1.5G, multiplexing, with and without SSC */
384 mw32(MVS_PA_VSR_PORT, 0x0084fffe);
385
386 if (revision == VANIR_B0_REV) {
387 mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
388 mw32(MVS_PA_VSR_PORT, 0x08001006);
389 mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
390 mw32(MVS_PA_VSR_PORT, 0x0000705f);
391 }
392
136 /* reset control */ 393 /* reset control */
137 mw32(MVS_PCS, 0); /* MVS_PCS */ 394 mw32(MVS_PCS, 0); /* MVS_PCS */
138 mw32(MVS_STP_REG_SET_0, 0); 395 mw32(MVS_STP_REG_SET_0, 0);
@@ -141,17 +398,8 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
141 /* init phys */ 398 /* init phys */
142 mvs_phy_hacks(mvi); 399 mvs_phy_hacks(mvi);
143 400
144 /* disable Multiplexing, enable phy implemented */
145 mw32(MVS_PORTS_IMP, 0xFF);
146
147
148 mw32(MVS_PA_VSR_ADDR, 0x00000104);
149 mw32(MVS_PA_VSR_PORT, 0x00018080);
150 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
151 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
152
153 /* set LED blink when IO*/ 401 /* set LED blink when IO*/
154 mw32(MVS_PA_VSR_ADDR, 0x00000030); 402 mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
155 tmp = mr32(MVS_PA_VSR_PORT); 403 tmp = mr32(MVS_PA_VSR_PORT);
156 tmp &= 0xFFFF00FF; 404 tmp &= 0xFFFF00FF;
157 tmp |= 0x00003300; 405 tmp |= 0x00003300;
@@ -175,12 +423,13 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
175 mvs_94xx_phy_disable(mvi, i); 423 mvs_94xx_phy_disable(mvi, i);
176 /* set phy local SAS address */ 424 /* set phy local SAS address */
177 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, 425 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
178 (mvi->phy[i].dev_sas_addr)); 426 cpu_to_le64(mvi->phy[i].dev_sas_addr));
179 427
180 mvs_94xx_enable_xmt(mvi, i); 428 mvs_94xx_enable_xmt(mvi, i);
429 mvs_94xx_config_reg_from_hba(mvi, i);
181 mvs_94xx_phy_enable(mvi, i); 430 mvs_94xx_phy_enable(mvi, i);
182 431
183 mvs_94xx_phy_reset(mvi, i, 1); 432 mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
184 msleep(500); 433 msleep(500);
185 mvs_94xx_detect_porttype(mvi, i); 434 mvs_94xx_detect_porttype(mvi, i);
186 } 435 }
@@ -211,16 +460,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
211 mvs_update_phyinfo(mvi, i, 1); 460 mvs_update_phyinfo(mvi, i, 1);
212 } 461 }
213 462
214 /* FIXME: update wide port bitmaps */
215
216 /* little endian for open address and command table, etc. */ 463 /* little endian for open address and command table, etc. */
217 /*
218 * it seems that ( from the spec ) turning on big-endian won't
219 * do us any good on big-endian machines, need further confirmation
220 */
221 cctl = mr32(MVS_CTL); 464 cctl = mr32(MVS_CTL);
222 cctl |= CCTL_ENDIAN_CMD; 465 cctl |= CCTL_ENDIAN_CMD;
223 cctl |= CCTL_ENDIAN_DATA;
224 cctl &= ~CCTL_ENDIAN_OPEN; 466 cctl &= ~CCTL_ENDIAN_OPEN;
225 cctl |= CCTL_ENDIAN_RSP; 467 cctl |= CCTL_ENDIAN_RSP;
226 mw32_f(MVS_CTL, cctl); 468 mw32_f(MVS_CTL, cctl);
@@ -228,15 +470,20 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
228 /* reset CMD queue */ 470 /* reset CMD queue */
229 tmp = mr32(MVS_PCS); 471 tmp = mr32(MVS_PCS);
230 tmp |= PCS_CMD_RST; 472 tmp |= PCS_CMD_RST;
473 tmp &= ~PCS_SELF_CLEAR;
231 mw32(MVS_PCS, tmp); 474 mw32(MVS_PCS, tmp);
232 /* interrupt coalescing may cause missing HW interrput in some case, 475 /*
233 * and the max count is 0x1ff, while our max slot is 0x200, 476 * the max count is 0x1ff, while our max slot is 0x200,
234 * it will make count 0. 477 * it will make count 0.
235 */ 478 */
236 tmp = 0; 479 tmp = 0;
237 mw32(MVS_INT_COAL, tmp); 480 if (MVS_CHIP_SLOT_SZ > 0x1ff)
481 mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
482 else
483 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
238 484
239 tmp = 0x100; 485 /* default interrupt coalescing time is 128us */
486 tmp = 0x10000 | interrupt_coalescing;
240 mw32(MVS_INT_COAL_TMOUT, tmp); 487 mw32(MVS_INT_COAL_TMOUT, tmp);
241 488
242 /* ladies and gentlemen, start your engines */ 489 /* ladies and gentlemen, start your engines */
@@ -249,7 +496,7 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
249 496
250 /* enable completion queue interrupt */ 497 /* enable completion queue interrupt */
251 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | 498 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
252 CINT_DMA_PCIE); 499 CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
253 tmp |= CINT_PHY_MASK; 500 tmp |= CINT_PHY_MASK;
254 mw32(MVS_INT_MASK, tmp); 501 mw32(MVS_INT_MASK, tmp);
255 502
@@ -332,13 +579,10 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
332 if (((stat & IRQ_SAS_A) && mvi->id == 0) || 579 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
333 ((stat & IRQ_SAS_B) && mvi->id == 1)) { 580 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
334 mw32_f(MVS_INT_STAT, CINT_DONE); 581 mw32_f(MVS_INT_STAT, CINT_DONE);
335 #ifndef MVS_USE_TASKLET 582
336 spin_lock(&mvi->lock); 583 spin_lock(&mvi->lock);
337 #endif
338 mvs_int_full(mvi); 584 mvs_int_full(mvi);
339 #ifndef MVS_USE_TASKLET
340 spin_unlock(&mvi->lock); 585 spin_unlock(&mvi->lock);
341 #endif
342 } 586 }
343 return IRQ_HANDLED; 587 return IRQ_HANDLED;
344} 588}
@@ -346,10 +590,48 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
346static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) 590static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
347{ 591{
348 u32 tmp; 592 u32 tmp;
349 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32)); 593 tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
350 do { 594 if (tmp && 1 << (slot_idx % 32)) {
351 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3)); 595 mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
352 } while (tmp & 1 << (slot_idx % 32)); 596 mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
597 1 << (slot_idx % 32));
598 do {
599 tmp = mvs_cr32(mvi,
600 MVS_COMMAND_ACTIVE + (slot_idx >> 3));
601 } while (tmp & 1 << (slot_idx % 32));
602 }
603}
604
605void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
606{
607 void __iomem *regs = mvi->regs;
608 u32 tmp;
609
610 if (clear_all) {
611 tmp = mr32(MVS_INT_STAT_SRS_0);
612 if (tmp) {
613 mv_dprintk("check SRS 0 %08X.\n", tmp);
614 mw32(MVS_INT_STAT_SRS_0, tmp);
615 }
616 tmp = mr32(MVS_INT_STAT_SRS_1);
617 if (tmp) {
618 mv_dprintk("check SRS 1 %08X.\n", tmp);
619 mw32(MVS_INT_STAT_SRS_1, tmp);
620 }
621 } else {
622 if (reg_set > 31)
623 tmp = mr32(MVS_INT_STAT_SRS_1);
624 else
625 tmp = mr32(MVS_INT_STAT_SRS_0);
626
627 if (tmp & (1 << (reg_set % 32))) {
628 mv_dprintk("register set 0x%x was stopped.\n", reg_set);
629 if (reg_set > 31)
630 mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
631 else
632 mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
633 }
634 }
353} 635}
354 636
355static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, 637static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
@@ -357,37 +639,56 @@ static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
357{ 639{
358 void __iomem *regs = mvi->regs; 640 void __iomem *regs = mvi->regs;
359 u32 tmp; 641 u32 tmp;
642 mvs_94xx_clear_srs_irq(mvi, 0, 1);
360 643
361 if (type == PORT_TYPE_SATA) { 644 tmp = mr32(MVS_INT_STAT);
362 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); 645 mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
363 mw32(MVS_INT_STAT_SRS_0, tmp);
364 }
365 mw32(MVS_INT_STAT, CINT_CI_STOP);
366 tmp = mr32(MVS_PCS) | 0xFF00; 646 tmp = mr32(MVS_PCS) | 0xFF00;
367 mw32(MVS_PCS, tmp); 647 mw32(MVS_PCS, tmp);
368} 648}
369 649
650static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
651{
652 void __iomem *regs = mvi->regs;
653 u32 err_0, err_1;
654 u8 i;
655 struct mvs_device *device;
656
657 err_0 = mr32(MVS_NON_NCQ_ERR_0);
658 err_1 = mr32(MVS_NON_NCQ_ERR_1);
659
660 mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
661 err_0, err_1);
662 for (i = 0; i < 32; i++) {
663 if (err_0 & bit(i)) {
664 device = mvs_find_dev_by_reg_set(mvi, i);
665 if (device)
666 mvs_release_task(mvi, device->sas_device);
667 }
668 if (err_1 & bit(i)) {
669 device = mvs_find_dev_by_reg_set(mvi, i+32);
670 if (device)
671 mvs_release_task(mvi, device->sas_device);
672 }
673 }
674
675 mw32(MVS_NON_NCQ_ERR_0, err_0);
676 mw32(MVS_NON_NCQ_ERR_1, err_1);
677}
678
370static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) 679static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
371{ 680{
372 void __iomem *regs = mvi->regs; 681 void __iomem *regs = mvi->regs;
373 u32 tmp;
374 u8 reg_set = *tfs; 682 u8 reg_set = *tfs;
375 683
376 if (*tfs == MVS_ID_NOT_MAPPED) 684 if (*tfs == MVS_ID_NOT_MAPPED)
377 return; 685 return;
378 686
379 mvi->sata_reg_set &= ~bit(reg_set); 687 mvi->sata_reg_set &= ~bit(reg_set);
380 if (reg_set < 32) { 688 if (reg_set < 32)
381 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); 689 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
382 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set; 690 else
383 if (tmp) 691 w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
384 mw32(MVS_INT_STAT_SRS_0, tmp);
385 } else {
386 w_reg_set_enable(reg_set, mvi->sata_reg_set);
387 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
388 if (tmp)
389 mw32(MVS_INT_STAT_SRS_1, tmp);
390 }
391 692
392 *tfs = MVS_ID_NOT_MAPPED; 693 *tfs = MVS_ID_NOT_MAPPED;
393 694
@@ -403,7 +704,7 @@ static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
403 return 0; 704 return 0;
404 705
405 i = mv_ffc64(mvi->sata_reg_set); 706 i = mv_ffc64(mvi->sata_reg_set);
406 if (i > 32) { 707 if (i >= 32) {
407 mvi->sata_reg_set |= bit(i); 708 mvi->sata_reg_set |= bit(i);
408 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); 709 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
409 *tfs = i; 710 *tfs = i;
@@ -422,9 +723,12 @@ static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
422 int i; 723 int i;
423 struct scatterlist *sg; 724 struct scatterlist *sg;
424 struct mvs_prd *buf_prd = prd; 725 struct mvs_prd *buf_prd = prd;
726 struct mvs_prd_imt im_len;
727 *(u32 *)&im_len = 0;
425 for_each_sg(scatter, sg, nr, i) { 728 for_each_sg(scatter, sg, nr, i) {
426 buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 729 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
427 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); 730 im_len.len = sg_dma_len(sg);
731 buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
428 buf_prd++; 732 buf_prd++;
429 } 733 }
430} 734}
@@ -433,7 +737,7 @@ static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
433{ 737{
434 u32 phy_st; 738 u32 phy_st;
435 phy_st = mvs_read_phy_ctl(mvi, i); 739 phy_st = mvs_read_phy_ctl(mvi, i);
436 if (phy_st & PHY_READY_MASK) /* phy ready */ 740 if (phy_st & PHY_READY_MASK)
437 return 1; 741 return 1;
438 return 0; 742 return 0;
439} 743}
@@ -447,7 +751,7 @@ static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
447 for (i = 0; i < 7; i++) { 751 for (i = 0; i < 7; i++) {
448 mvs_write_port_cfg_addr(mvi, port_id, 752 mvs_write_port_cfg_addr(mvi, port_id,
449 CONFIG_ID_FRAME0 + i * 4); 753 CONFIG_ID_FRAME0 + i * 4);
450 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); 754 id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
451 } 755 }
452 memcpy(id, id_frame, 28); 756 memcpy(id, id_frame, 28);
453} 757}
@@ -458,15 +762,13 @@ static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
458 int i; 762 int i;
459 u32 id_frame[7]; 763 u32 id_frame[7];
460 764
461 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
462 for (i = 0; i < 7; i++) { 765 for (i = 0; i < 7; i++) {
463 mvs_write_port_cfg_addr(mvi, port_id, 766 mvs_write_port_cfg_addr(mvi, port_id,
464 CONFIG_ATT_ID_FRAME0 + i * 4); 767 CONFIG_ATT_ID_FRAME0 + i * 4);
465 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); 768 id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
466 mv_dprintk("94xx phy %d atta frame %d %x.\n", 769 mv_dprintk("94xx phy %d atta frame %d %x.\n",
467 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); 770 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
468 } 771 }
469 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
470 memcpy(id, id_frame, 28); 772 memcpy(id, id_frame, 28);
471} 773}
472 774
@@ -526,7 +828,18 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
526void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, 828void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
527 struct sas_phy_linkrates *rates) 829 struct sas_phy_linkrates *rates)
528{ 830{
529 /* TODO */ 831 u32 lrmax = 0;
832 u32 tmp;
833
834 tmp = mvs_read_phy_ctl(mvi, phy_id);
835 lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
836
837 if (lrmax) {
838 tmp &= ~(0x3 << 12);
839 tmp |= lrmax;
840 }
841 mvs_write_phy_ctl(mvi, phy_id, tmp);
842 mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
530} 843}
531 844
532static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) 845static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
@@ -603,27 +916,59 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
603 return -1; 916 return -1;
604} 917}
605 918
606#ifndef DISABLE_HOTPLUG_DMA_FIX 919void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
607void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) 920 int buf_len, int from, void *prd)
608{ 921{
609 int i; 922 int i;
610 struct mvs_prd *buf_prd = prd; 923 struct mvs_prd *buf_prd = prd;
924 dma_addr_t buf_dma;
925 struct mvs_prd_imt im_len;
926
927 *(u32 *)&im_len = 0;
611 buf_prd += from; 928 buf_prd += from;
612 for (i = 0; i < MAX_SG_ENTRY - from; i++) { 929
613 buf_prd->addr = cpu_to_le64(buf_dma); 930#define PRD_CHAINED_ENTRY 0x01
614 buf_prd->im_len.len = cpu_to_le32(buf_len); 931 if ((mvi->pdev->revision == VANIR_A0_REV) ||
615 ++buf_prd; 932 (mvi->pdev->revision == VANIR_B0_REV))
933 buf_dma = (phy_mask <= 0x08) ?
934 mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
935 else
936 return;
937
938 for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
939 if (i == MAX_SG_ENTRY - 1) {
940 buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
941 im_len.len = 2;
942 im_len.misc_ctl = PRD_CHAINED_ENTRY;
943 } else {
944 buf_prd->addr = cpu_to_le64(buf_dma);
945 im_len.len = buf_len;
946 }
947 buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
616 } 948 }
617} 949}
618#endif
619 950
620/* 951static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
621 * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
622 * with 64xx fixes
623 */
624static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
625 u8 clear_all)
626{ 952{
953 void __iomem *regs = mvi->regs;
954 u32 tmp = 0;
955 /*
956 * the max count is 0x1ff, while our max slot is 0x200,
957 * it will make count 0.
958 */
959 if (time == 0) {
960 mw32(MVS_INT_COAL, 0);
961 mw32(MVS_INT_COAL_TMOUT, 0x10000);
962 } else {
963 if (MVS_CHIP_SLOT_SZ > 0x1ff)
964 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
965 else
966 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
967
968 tmp = 0x10000 | time;
969 mw32(MVS_INT_COAL_TMOUT, tmp);
970 }
971
627} 972}
628 973
629const struct mvs_dispatch mvs_94xx_dispatch = { 974const struct mvs_dispatch mvs_94xx_dispatch = {
@@ -648,7 +993,6 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
648 mvs_write_port_irq_stat, 993 mvs_write_port_irq_stat,
649 mvs_read_port_irq_mask, 994 mvs_read_port_irq_mask,
650 mvs_write_port_irq_mask, 995 mvs_write_port_irq_mask,
651 mvs_get_sas_addr,
652 mvs_94xx_command_active, 996 mvs_94xx_command_active,
653 mvs_94xx_clear_srs_irq, 997 mvs_94xx_clear_srs_irq,
654 mvs_94xx_issue_stop, 998 mvs_94xx_issue_stop,
@@ -676,8 +1020,8 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
676 mvs_94xx_spi_buildcmd, 1020 mvs_94xx_spi_buildcmd,
677 mvs_94xx_spi_issuecmd, 1021 mvs_94xx_spi_issuecmd,
678 mvs_94xx_spi_waitdataready, 1022 mvs_94xx_spi_waitdataready,
679#ifndef DISABLE_HOTPLUG_DMA_FIX
680 mvs_94xx_fix_dma, 1023 mvs_94xx_fix_dma,
681#endif 1024 mvs_94xx_tune_interrupt,
1025 mvs_94xx_non_spec_ncq_error,
682}; 1026};
683 1027
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 8835befe2c0e..8f7eb4f21140 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -30,6 +30,14 @@
30 30
31#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS 31#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
32 32
33enum VANIR_REVISION_ID {
34 VANIR_A0_REV = 0xA0,
35 VANIR_B0_REV = 0x01,
36 VANIR_C0_REV = 0x02,
37 VANIR_C1_REV = 0x03,
38 VANIR_C2_REV = 0xC2,
39};
40
33enum hw_registers { 41enum hw_registers {
34 MVS_GBL_CTL = 0x04, /* global control */ 42 MVS_GBL_CTL = 0x04, /* global control */
35 MVS_GBL_INT_STAT = 0x00, /* global irq status */ 43 MVS_GBL_INT_STAT = 0x00, /* global irq status */
@@ -101,6 +109,7 @@ enum hw_registers {
101 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ 109 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
102 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ 110 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
103 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ 111 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
112 MVS_COMMAND_ACTIVE = 0x300,
104}; 113};
105 114
106enum pci_cfg_registers { 115enum pci_cfg_registers {
@@ -112,26 +121,29 @@ enum pci_cfg_registers {
112 121
113/* SAS/SATA Vendor Specific Port Registers */ 122/* SAS/SATA Vendor Specific Port Registers */
114enum sas_sata_vsp_regs { 123enum sas_sata_vsp_regs {
115 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */ 124 VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */
116 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */ 125 VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */
117 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */ 126 VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */
118 VSR_PHY_MODE3 = 0x03 * 4, /* pll */ 127 VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */
119 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */ 128 VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */
120 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */ 129 VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */
121 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */ 130 VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */
122 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */ 131 VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */
123 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */ 132 VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */
124 VSR_PHY_MODE9 = 0x09 * 4, /* Test */ 133 VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */
125 VSR_PHY_MODE10 = 0x0A * 4, /* Power */ 134 VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */
126 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */ 135 VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */
127 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */ 136 VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */
128 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */ 137
138 VSR_PHY_FFE_CONTROL = 0x10C,
139 VSR_PHY_DFE_UPDATE_CRTL = 0x110,
140 VSR_REF_CLOCK_CRTL = 0x1A0,
129}; 141};
130 142
131enum chip_register_bits { 143enum chip_register_bits {
132 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), 144 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), 145 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12), 146 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
135 PHY_NEG_SPP_PHYS_LINK_RATE_MASK = 147 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
136 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), 148 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
137}; 149};
@@ -169,22 +181,75 @@ enum pci_interrupt_cause {
169 IRQ_PCIE_ERR = (1 << 31), 181 IRQ_PCIE_ERR = (1 << 31),
170}; 182};
171 183
184union reg_phy_cfg {
185 u32 v;
186 struct {
187 u32 phy_reset:1;
188 u32 sas_support:1;
189 u32 sata_support:1;
190 u32 sata_host_mode:1;
191 /*
192 * bit 2: 6Gbps support
193 * bit 1: 3Gbps support
194 * bit 0: 1.5Gbps support
195 */
196 u32 speed_support:3;
197 u32 snw_3_support:1;
198 u32 tx_lnk_parity:1;
199 /*
200 * bit 5: G1 (1.5Gbps) Without SSC
201 * bit 4: G1 (1.5Gbps) with SSC
202 * bit 3: G2 (3.0Gbps) Without SSC
203 * bit 2: G2 (3.0Gbps) with SSC
204 * bit 1: G3 (6.0Gbps) without SSC
205 * bit 0: G3 (6.0Gbps) with SSC
206 */
207 u32 tx_spt_phs_lnk_rate:6;
208 /* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */
209 u32 tx_lgcl_lnk_rate:4;
210 u32 tx_ssc_type:1;
211 u32 sata_spin_up_spt:1;
212 u32 sata_spin_up_en:1;
213 u32 bypass_oob:1;
214 u32 disable_phy:1;
215 u32 rsvd:8;
216 } u;
217};
218
172#define MAX_SG_ENTRY 255 219#define MAX_SG_ENTRY 255
173 220
174struct mvs_prd_imt { 221struct mvs_prd_imt {
222#ifndef __BIG_ENDIAN
175 __le32 len:22; 223 __le32 len:22;
176 u8 _r_a:2; 224 u8 _r_a:2;
177 u8 misc_ctl:4; 225 u8 misc_ctl:4;
178 u8 inter_sel:4; 226 u8 inter_sel:4;
227#else
228 u32 inter_sel:4;
229 u32 misc_ctl:4;
230 u32 _r_a:2;
231 u32 len:22;
232#endif
179}; 233};
180 234
181struct mvs_prd { 235struct mvs_prd {
182 /* 64-bit buffer address */ 236 /* 64-bit buffer address */
183 __le64 addr; 237 __le64 addr;
184 /* 22-bit length */ 238 /* 22-bit length */
185 struct mvs_prd_imt im_len; 239 __le32 im_len;
186} __attribute__ ((packed)); 240} __attribute__ ((packed));
187 241
242/*
243 * these registers are accessed through port vendor
244 * specific address/data registers
245 */
246enum sas_sata_phy_regs {
247 GENERATION_1_SETTING = 0x118,
248 GENERATION_1_2_SETTING = 0x11C,
249 GENERATION_2_3_SETTING = 0x120,
250 GENERATION_3_4_SETTING = 0x124,
251};
252
188#define SPI_CTRL_REG_94XX 0xc800 253#define SPI_CTRL_REG_94XX 0xc800
189#define SPI_ADDR_REG_94XX 0xc804 254#define SPI_ADDR_REG_94XX 0xc804
190#define SPI_WR_DATA_REG_94XX 0xc808 255#define SPI_WR_DATA_REG_94XX 0xc808
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index 1753a6fc42d0..bcc408042cee 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -164,7 +164,6 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
164{ 164{
165 u32 tmp; 165 u32 tmp;
166 166
167 /* workaround for SATA R-ERR, to ignore phy glitch */
168 tmp = mvs_cr32(mvi, CMD_PHY_TIMER); 167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
169 tmp &= ~(1 << 9); 168 tmp &= ~(1 << 9);
170 tmp |= (1 << 10); 169 tmp |= (1 << 10);
@@ -179,23 +178,10 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
179 tmp |= 0x3fff; 178 tmp |= 0x3fff;
180 mvs_cw32(mvi, CMD_SAS_CTL0, tmp); 179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
181 180
182 /* workaround for WDTIMEOUT , set to 550 ms */
183 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); 181 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
184 182
185 /* not to halt for different port op during wideport link change */ 183 /* not to halt for different port op during wideport link change */
186 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); 184 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
187
188 /* workaround for Seagate disk not-found OOB sequence, recv
189 * COMINIT before sending out COMWAKE */
190 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
191 tmp &= 0x0000ffff;
192 tmp |= 0x00fa0000;
193 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
194
195 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
196 tmp &= 0x1fffffff;
197 tmp |= (2U << 29); /* 8 ms retry */
198 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
199} 185}
200 186
201static inline void mvs_int_sata(struct mvs_info *mvi) 187static inline void mvs_int_sata(struct mvs_info *mvi)
@@ -223,6 +209,9 @@ static inline void mvs_int_full(struct mvs_info *mvi)
223 mvs_int_port(mvi, i, tmp); 209 mvs_int_port(mvi, i, tmp);
224 } 210 }
225 211
212 if (stat & CINT_NON_SPEC_NCQ_ERROR)
213 MVS_CHIP_DISP->non_spec_ncq_error(mvi);
214
226 if (stat & CINT_SRS) 215 if (stat & CINT_SRS)
227 mvs_int_sata(mvi); 216 mvs_int_sata(mvi);
228 217
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index bc00c940743c..dec7cadb7485 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -43,7 +43,6 @@ enum chip_flavors {
43 43
44/* driver compile-time configuration */ 44/* driver compile-time configuration */
45enum driver_configuration { 45enum driver_configuration {
46 MVS_SLOTS = 512, /* command slots */
47 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ 46 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
48 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ 47 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
49 /* software requires power-of-2 48 /* software requires power-of-2
@@ -56,8 +55,7 @@ enum driver_configuration {
56 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ 55 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
57 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ 56 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
58 MVS_OAF_SZ = 64, /* Open address frame buffer size */ 57 MVS_OAF_SZ = 64, /* Open address frame buffer size */
59 MVS_QUEUE_SIZE = 32, /* Support Queue depth */ 58 MVS_QUEUE_SIZE = 64, /* Support Queue depth */
60 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
61 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, 59 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
62}; 60};
63 61
@@ -144,6 +142,7 @@ enum hw_register_bits {
144 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ 142 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
145 CINT_MEM = (1U << 26), /* int mem parity err */ 143 CINT_MEM = (1U << 26), /* int mem parity err */
146 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ 144 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
145 CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */
147 CINT_SRS = (1U << 3), /* SRS event */ 146 CINT_SRS = (1U << 3), /* SRS event */
148 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ 147 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
149 CINT_DONE = (1U << 0), /* cmd completion */ 148 CINT_DONE = (1U << 0), /* cmd completion */
@@ -161,7 +160,7 @@ enum hw_register_bits {
161 TXQ_CMD_SSP = 1, /* SSP protocol */ 160 TXQ_CMD_SSP = 1, /* SSP protocol */
162 TXQ_CMD_SMP = 2, /* SMP protocol */ 161 TXQ_CMD_SMP = 2, /* SMP protocol */
163 TXQ_CMD_STP = 3, /* STP/SATA protocol */ 162 TXQ_CMD_STP = 3, /* STP/SATA protocol */
164 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ 163 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */
165 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ 164 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
166 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ 165 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
167 TXQ_MODE_TARGET = 0, 166 TXQ_MODE_TARGET = 0,
@@ -391,15 +390,15 @@ enum sas_cmd_port_registers {
391}; 390};
392 391
393enum mvs_info_flags { 392enum mvs_info_flags {
394 MVF_MSI = (1U << 0), /* MSI is enabled */
395 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ 393 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
396 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ 394 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
397}; 395};
398 396
399enum mvs_event_flags { 397enum mvs_event_flags {
400 PHY_PLUG_EVENT = (3U), 398 PHY_PLUG_EVENT = (3U),
401 PHY_PLUG_IN = (1U << 0), /* phy plug in */ 399 PHY_PLUG_IN = (1U << 0), /* phy plug in */
402 PHY_PLUG_OUT = (1U << 1), /* phy plug out */ 400 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
401 EXP_BRCT_CHG = (1U << 2), /* broadcast change */
403}; 402};
404 403
405enum mvs_port_type { 404enum mvs_port_type {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90b636611cde..4e9af66fd1d3 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -34,22 +34,25 @@ MODULE_PARM_DESC(collector, "\n"
34 "\tThe mvsas SAS LLDD supports both modes.\n" 34 "\tThe mvsas SAS LLDD supports both modes.\n"
35 "\tDefault: 1 (Direct Mode).\n"); 35 "\tDefault: 1 (Direct Mode).\n");
36 36
37int interrupt_coalescing = 0x80;
38
37static struct scsi_transport_template *mvs_stt; 39static struct scsi_transport_template *mvs_stt;
38struct kmem_cache *mvs_task_list_cache; 40struct kmem_cache *mvs_task_list_cache;
39static const struct mvs_chip_info mvs_chips[] = { 41static const struct mvs_chip_info mvs_chips[] = {
40 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 42 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
41 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 43 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
42 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, 44 [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
43 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 45 [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
44 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 46 [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
45 [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, 47 [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
46 [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, 48 [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
47 [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 49 [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
48 [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 50 [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
49}; 51};
50 52
53struct device_attribute *mvst_host_attrs[];
54
51#define SOC_SAS_NUM 2 55#define SOC_SAS_NUM 2
52#define SG_MX 64
53 56
54static struct scsi_host_template mvs_sht = { 57static struct scsi_host_template mvs_sht = {
55 .module = THIS_MODULE, 58 .module = THIS_MODULE,
@@ -66,7 +69,7 @@ static struct scsi_host_template mvs_sht = {
66 .can_queue = 1, 69 .can_queue = 1,
67 .cmd_per_lun = 1, 70 .cmd_per_lun = 1,
68 .this_id = -1, 71 .this_id = -1,
69 .sg_tablesize = SG_MX, 72 .sg_tablesize = SG_ALL,
70 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 73 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
71 .use_clustering = ENABLE_CLUSTERING, 74 .use_clustering = ENABLE_CLUSTERING,
72 .eh_device_reset_handler = sas_eh_device_reset_handler, 75 .eh_device_reset_handler = sas_eh_device_reset_handler,
@@ -74,6 +77,7 @@ static struct scsi_host_template mvs_sht = {
74 .slave_alloc = mvs_slave_alloc, 77 .slave_alloc = mvs_slave_alloc,
75 .target_destroy = sas_target_destroy, 78 .target_destroy = sas_target_destroy,
76 .ioctl = sas_ioctl, 79 .ioctl = sas_ioctl,
80 .shost_attrs = mvst_host_attrs,
77}; 81};
78 82
79static struct sas_domain_function_template mvs_transport_ops = { 83static struct sas_domain_function_template mvs_transport_ops = {
@@ -100,6 +104,7 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
100 struct asd_sas_phy *sas_phy = &phy->sas_phy; 104 struct asd_sas_phy *sas_phy = &phy->sas_phy;
101 105
102 phy->mvi = mvi; 106 phy->mvi = mvi;
107 phy->port = NULL;
103 init_timer(&phy->timer); 108 init_timer(&phy->timer);
104 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; 109 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
105 sas_phy->class = SAS; 110 sas_phy->class = SAS;
@@ -128,7 +133,7 @@ static void mvs_free(struct mvs_info *mvi)
128 if (mvi->flags & MVF_FLAG_SOC) 133 if (mvi->flags & MVF_FLAG_SOC)
129 slot_nr = MVS_SOC_SLOTS; 134 slot_nr = MVS_SOC_SLOTS;
130 else 135 else
131 slot_nr = MVS_SLOTS; 136 slot_nr = MVS_CHIP_SLOT_SZ;
132 137
133 if (mvi->dma_pool) 138 if (mvi->dma_pool)
134 pci_pool_destroy(mvi->dma_pool); 139 pci_pool_destroy(mvi->dma_pool);
@@ -148,25 +153,26 @@ static void mvs_free(struct mvs_info *mvi)
148 dma_free_coherent(mvi->dev, 153 dma_free_coherent(mvi->dev,
149 sizeof(*mvi->slot) * slot_nr, 154 sizeof(*mvi->slot) * slot_nr,
150 mvi->slot, mvi->slot_dma); 155 mvi->slot, mvi->slot_dma);
151#ifndef DISABLE_HOTPLUG_DMA_FIX 156
152 if (mvi->bulk_buffer) 157 if (mvi->bulk_buffer)
153 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, 158 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
154 mvi->bulk_buffer, mvi->bulk_buffer_dma); 159 mvi->bulk_buffer, mvi->bulk_buffer_dma);
155#endif 160 if (mvi->bulk_buffer1)
161 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
162 mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
156 163
157 MVS_CHIP_DISP->chip_iounmap(mvi); 164 MVS_CHIP_DISP->chip_iounmap(mvi);
158 if (mvi->shost) 165 if (mvi->shost)
159 scsi_host_put(mvi->shost); 166 scsi_host_put(mvi->shost);
160 list_for_each_entry(mwq, &mvi->wq_list, entry) 167 list_for_each_entry(mwq, &mvi->wq_list, entry)
161 cancel_delayed_work(&mwq->work_q); 168 cancel_delayed_work(&mwq->work_q);
169 kfree(mvi->tags);
162 kfree(mvi); 170 kfree(mvi);
163} 171}
164 172
165#ifdef MVS_USE_TASKLET 173#ifdef CONFIG_SCSI_MVSAS_TASKLET
166struct tasklet_struct mv_tasklet;
167static void mvs_tasklet(unsigned long opaque) 174static void mvs_tasklet(unsigned long opaque)
168{ 175{
169 unsigned long flags;
170 u32 stat; 176 u32 stat;
171 u16 core_nr, i = 0; 177 u16 core_nr, i = 0;
172 178
@@ -179,35 +185,49 @@ static void mvs_tasklet(unsigned long opaque)
179 if (unlikely(!mvi)) 185 if (unlikely(!mvi))
180 BUG_ON(1); 186 BUG_ON(1);
181 187
188 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
189 if (!stat)
190 goto out;
191
182 for (i = 0; i < core_nr; i++) { 192 for (i = 0; i < core_nr; i++) {
183 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 193 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
184 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq); 194 MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
185 if (stat)
186 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
187 } 195 }
196out:
197 MVS_CHIP_DISP->interrupt_enable(mvi);
188 198
189} 199}
190#endif 200#endif
191 201
192static irqreturn_t mvs_interrupt(int irq, void *opaque) 202static irqreturn_t mvs_interrupt(int irq, void *opaque)
193{ 203{
194 u32 core_nr, i = 0; 204 u32 core_nr;
195 u32 stat; 205 u32 stat;
196 struct mvs_info *mvi; 206 struct mvs_info *mvi;
197 struct sas_ha_struct *sha = opaque; 207 struct sas_ha_struct *sha = opaque;
208#ifndef CONFIG_SCSI_MVSAS_TASKLET
209 u32 i;
210#endif
198 211
199 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 212 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
200 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 213 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
201 214
202 if (unlikely(!mvi)) 215 if (unlikely(!mvi))
203 return IRQ_NONE; 216 return IRQ_NONE;
217#ifdef CONFIG_SCSI_MVSAS_TASKLET
218 MVS_CHIP_DISP->interrupt_disable(mvi);
219#endif
204 220
205 stat = MVS_CHIP_DISP->isr_status(mvi, irq); 221 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
206 if (!stat) 222 if (!stat) {
223 #ifdef CONFIG_SCSI_MVSAS_TASKLET
224 MVS_CHIP_DISP->interrupt_enable(mvi);
225 #endif
207 return IRQ_NONE; 226 return IRQ_NONE;
227 }
208 228
209#ifdef MVS_USE_TASKLET 229#ifdef CONFIG_SCSI_MVSAS_TASKLET
210 tasklet_schedule(&mv_tasklet); 230 tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
211#else 231#else
212 for (i = 0; i < core_nr; i++) { 232 for (i = 0; i < core_nr; i++) {
213 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 233 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
@@ -225,7 +245,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
225 if (mvi->flags & MVF_FLAG_SOC) 245 if (mvi->flags & MVF_FLAG_SOC)
226 slot_nr = MVS_SOC_SLOTS; 246 slot_nr = MVS_SOC_SLOTS;
227 else 247 else
228 slot_nr = MVS_SLOTS; 248 slot_nr = MVS_CHIP_SLOT_SZ;
229 249
230 spin_lock_init(&mvi->lock); 250 spin_lock_init(&mvi->lock);
231 for (i = 0; i < mvi->chip->n_phy; i++) { 251 for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -273,13 +293,18 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
273 goto err_out; 293 goto err_out;
274 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); 294 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
275 295
276#ifndef DISABLE_HOTPLUG_DMA_FIX
277 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, 296 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
278 TRASH_BUCKET_SIZE, 297 TRASH_BUCKET_SIZE,
279 &mvi->bulk_buffer_dma, GFP_KERNEL); 298 &mvi->bulk_buffer_dma, GFP_KERNEL);
280 if (!mvi->bulk_buffer) 299 if (!mvi->bulk_buffer)
281 goto err_out; 300 goto err_out;
282#endif 301
302 mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
303 TRASH_BUCKET_SIZE,
304 &mvi->bulk_buffer_dma1, GFP_KERNEL);
305 if (!mvi->bulk_buffer1)
306 goto err_out;
307
283 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); 308 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
284 mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0); 309 mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
285 if (!mvi->dma_pool) { 310 if (!mvi->dma_pool) {
@@ -354,11 +379,12 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
354 const struct pci_device_id *ent, 379 const struct pci_device_id *ent,
355 struct Scsi_Host *shost, unsigned int id) 380 struct Scsi_Host *shost, unsigned int id)
356{ 381{
357 struct mvs_info *mvi; 382 struct mvs_info *mvi = NULL;
358 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 383 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
359 384
360 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info), 385 mvi = kzalloc(sizeof(*mvi) +
361 GFP_KERNEL); 386 (1L << mvs_chips[ent->driver_data].slot_width) *
387 sizeof(struct mvs_slot_info), GFP_KERNEL);
362 if (!mvi) 388 if (!mvi)
363 return NULL; 389 return NULL;
364 390
@@ -367,7 +393,6 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
367 mvi->chip_id = ent->driver_data; 393 mvi->chip_id = ent->driver_data;
368 mvi->chip = &mvs_chips[mvi->chip_id]; 394 mvi->chip = &mvs_chips[mvi->chip_id];
369 INIT_LIST_HEAD(&mvi->wq_list); 395 INIT_LIST_HEAD(&mvi->wq_list);
370 mvi->irq = pdev->irq;
371 396
372 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; 397 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
373 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; 398 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
@@ -375,9 +400,10 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
375 mvi->id = id; 400 mvi->id = id;
376 mvi->sas = sha; 401 mvi->sas = sha;
377 mvi->shost = shost; 402 mvi->shost = shost;
378#ifdef MVS_USE_TASKLET 403
379 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha); 404 mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
380#endif 405 if (!mvi->tags)
406 goto err_out;
381 407
382 if (MVS_CHIP_DISP->chip_ioremap(mvi)) 408 if (MVS_CHIP_DISP->chip_ioremap(mvi))
383 goto err_out; 409 goto err_out;
@@ -388,7 +414,6 @@ err_out:
388 return NULL; 414 return NULL;
389} 415}
390 416
391/* move to PCI layer or libata core? */
392static int pci_go_64(struct pci_dev *pdev) 417static int pci_go_64(struct pci_dev *pdev)
393{ 418{
394 int rc; 419 int rc;
@@ -450,7 +475,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
450 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; 475 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
451 476
452 shost->transportt = mvs_stt; 477 shost->transportt = mvs_stt;
453 shost->max_id = 128; 478 shost->max_id = MVS_MAX_DEVICES;
454 shost->max_lun = ~0; 479 shost->max_lun = ~0;
455 shost->max_channel = 1; 480 shost->max_channel = 1;
456 shost->max_cmd_len = 16; 481 shost->max_cmd_len = 16;
@@ -493,11 +518,12 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
493 if (mvi->flags & MVF_FLAG_SOC) 518 if (mvi->flags & MVF_FLAG_SOC)
494 can_queue = MVS_SOC_CAN_QUEUE; 519 can_queue = MVS_SOC_CAN_QUEUE;
495 else 520 else
496 can_queue = MVS_CAN_QUEUE; 521 can_queue = MVS_CHIP_SLOT_SZ;
497 522
498 sha->lldd_queue_size = can_queue; 523 sha->lldd_queue_size = can_queue;
524 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
499 shost->can_queue = can_queue; 525 shost->can_queue = can_queue;
500 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys; 526 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
501 sha->core.shost = mvi->shost; 527 sha->core.shost = mvi->shost;
502} 528}
503 529
@@ -518,6 +544,7 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
518{ 544{
519 unsigned int rc, nhost = 0; 545 unsigned int rc, nhost = 0;
520 struct mvs_info *mvi; 546 struct mvs_info *mvi;
547 struct mvs_prv_info *mpi;
521 irq_handler_t irq_handler = mvs_interrupt; 548 irq_handler_t irq_handler = mvs_interrupt;
522 struct Scsi_Host *shost = NULL; 549 struct Scsi_Host *shost = NULL;
523 const struct mvs_chip_info *chip; 550 const struct mvs_chip_info *chip;
@@ -569,6 +596,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
569 goto err_out_regions; 596 goto err_out_regions;
570 } 597 }
571 598
599 memset(&mvi->hba_info_param, 0xFF,
600 sizeof(struct hba_info_page));
601
572 mvs_init_sas_add(mvi); 602 mvs_init_sas_add(mvi);
573 603
574 mvi->instance = nhost; 604 mvi->instance = nhost;
@@ -579,8 +609,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
579 } 609 }
580 nhost++; 610 nhost++;
581 } while (nhost < chip->n_host); 611 } while (nhost < chip->n_host);
582#ifdef MVS_USE_TASKLET 612 mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
583 tasklet_init(&mv_tasklet, mvs_tasklet, 613#ifdef CONFIG_SCSI_MVSAS_TASKLET
614 tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
584 (unsigned long)SHOST_TO_SAS_HA(shost)); 615 (unsigned long)SHOST_TO_SAS_HA(shost));
585#endif 616#endif
586 617
@@ -625,8 +656,8 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
625 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 656 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
626 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 657 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
627 658
628#ifdef MVS_USE_TASKLET 659#ifdef CONFIG_SCSI_MVSAS_TASKLET
629 tasklet_kill(&mv_tasklet); 660 tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
630#endif 661#endif
631 662
632 pci_set_drvdata(pdev, NULL); 663 pci_set_drvdata(pdev, NULL);
@@ -635,7 +666,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
635 scsi_remove_host(mvi->shost); 666 scsi_remove_host(mvi->shost);
636 667
637 MVS_CHIP_DISP->interrupt_disable(mvi); 668 MVS_CHIP_DISP->interrupt_disable(mvi);
638 free_irq(mvi->irq, sha); 669 free_irq(mvi->pdev->irq, sha);
639 for (i = 0; i < core_nr; i++) { 670 for (i = 0; i < core_nr; i++) {
640 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 671 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
641 mvs_free(mvi); 672 mvs_free(mvi);
@@ -703,6 +734,70 @@ static struct pci_driver mvs_pci_driver = {
703 .remove = __devexit_p(mvs_pci_remove), 734 .remove = __devexit_p(mvs_pci_remove),
704}; 735};
705 736
737static ssize_t
738mvs_show_driver_version(struct device *cdev,
739 struct device_attribute *attr, char *buffer)
740{
741 return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
742}
743
744static DEVICE_ATTR(driver_version,
745 S_IRUGO,
746 mvs_show_driver_version,
747 NULL);
748
749static ssize_t
750mvs_store_interrupt_coalescing(struct device *cdev,
751 struct device_attribute *attr,
752 const char *buffer, size_t size)
753{
754 int val = 0;
755 struct mvs_info *mvi = NULL;
756 struct Scsi_Host *shost = class_to_shost(cdev);
757 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
758 u8 i, core_nr;
759 if (buffer == NULL)
760 return size;
761
762 if (sscanf(buffer, "%d", &val) != 1)
763 return -EINVAL;
764
765 if (val >= 0x10000) {
766 mv_dprintk("interrupt coalescing timer %d us is"
767 "too long\n", val);
768 return strlen(buffer);
769 }
770
771 interrupt_coalescing = val;
772
773 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
774 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
775
776 if (unlikely(!mvi))
777 return -EINVAL;
778
779 for (i = 0; i < core_nr; i++) {
780 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
781 if (MVS_CHIP_DISP->tune_interrupt)
782 MVS_CHIP_DISP->tune_interrupt(mvi,
783 interrupt_coalescing);
784 }
785 mv_dprintk("set interrupt coalescing time to %d us\n",
786 interrupt_coalescing);
787 return strlen(buffer);
788}
789
790static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
791 struct device_attribute *attr, char *buffer)
792{
793 return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
794}
795
796static DEVICE_ATTR(interrupt_coalescing,
797 S_IRUGO|S_IWUSR,
798 mvs_show_interrupt_coalescing,
799 mvs_store_interrupt_coalescing);
800
706/* task handler */ 801/* task handler */
707struct task_struct *mvs_th; 802struct task_struct *mvs_th;
708static int __init mvs_init(void) 803static int __init mvs_init(void)
@@ -739,6 +834,12 @@ static void __exit mvs_exit(void)
739 kmem_cache_destroy(mvs_task_list_cache); 834 kmem_cache_destroy(mvs_task_list_cache);
740} 835}
741 836
837struct device_attribute *mvst_host_attrs[] = {
838 &dev_attr_driver_version,
839 &dev_attr_interrupt_coalescing,
840 NULL,
841};
842
742module_init(mvs_init); 843module_init(mvs_init);
743module_exit(mvs_exit); 844module_exit(mvs_exit);
744 845
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0ef27425c447..4958fefff365 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -38,7 +38,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
38 38
39void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 39void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
40{ 40{
41 void *bitmap = &mvi->tags; 41 void *bitmap = mvi->tags;
42 clear_bit(tag, bitmap); 42 clear_bit(tag, bitmap);
43} 43}
44 44
@@ -49,14 +49,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag)
49 49
50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
51{ 51{
52 void *bitmap = &mvi->tags; 52 void *bitmap = mvi->tags;
53 set_bit(tag, bitmap); 53 set_bit(tag, bitmap);
54} 54}
55 55
56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
57{ 57{
58 unsigned int index, tag; 58 unsigned int index, tag;
59 void *bitmap = &mvi->tags; 59 void *bitmap = mvi->tags;
60 60
61 index = find_first_zero_bit(bitmap, mvi->tags_num); 61 index = find_first_zero_bit(bitmap, mvi->tags_num);
62 tag = index; 62 tag = index;
@@ -74,126 +74,6 @@ void mvs_tag_init(struct mvs_info *mvi)
74 mvs_tag_clear(mvi, i); 74 mvs_tag_clear(mvi, i);
75} 75}
76 76
77void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
78{
79 u32 i;
80 u32 run;
81 u32 offset;
82
83 offset = 0;
84 while (size) {
85 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
86 if (size >= 16)
87 run = 16;
88 else
89 run = size;
90 size -= run;
91 for (i = 0; i < 16; i++) {
92 if (i < run)
93 printk(KERN_DEBUG"%02X ", (u32)data[i]);
94 else
95 printk(KERN_DEBUG" ");
96 }
97 printk(KERN_DEBUG": ");
98 for (i = 0; i < run; i++)
99 printk(KERN_DEBUG"%c",
100 isalnum(data[i]) ? data[i] : '.');
101 printk(KERN_DEBUG"\n");
102 data = &data[16];
103 offset += run;
104 }
105 printk(KERN_DEBUG"\n");
106}
107
108#if (_MV_DUMP > 1)
109static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
110 enum sas_protocol proto)
111{
112 u32 offset;
113 struct mvs_slot_info *slot = &mvi->slot_info[tag];
114
115 offset = slot->cmd_size + MVS_OAF_SZ +
116 MVS_CHIP_DISP->prd_size() * slot->n_elem;
117 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
118 tag);
119 mvs_hexdump(32, (u8 *) slot->response,
120 (u32) slot->buf_dma + offset);
121}
122#endif
123
124static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
125 enum sas_protocol proto)
126{
127#if (_MV_DUMP > 1)
128 u32 sz, w_ptr;
129 u64 addr;
130 struct mvs_slot_info *slot = &mvi->slot_info[tag];
131
132 /*Delivery Queue */
133 sz = MVS_CHIP_SLOT_SZ;
134 w_ptr = slot->tx;
135 addr = mvi->tx_dma;
136 dev_printk(KERN_DEBUG, mvi->dev,
137 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
138 dev_printk(KERN_DEBUG, mvi->dev,
139 "Delivery Queue Base Address=0x%llX (PA)"
140 "(tx_dma=0x%llX), Entry=%04d\n",
141 addr, (unsigned long long)mvi->tx_dma, w_ptr);
142 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
143 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
144 /*Command List */
145 addr = mvi->slot_dma;
146 dev_printk(KERN_DEBUG, mvi->dev,
147 "Command List Base Address=0x%llX (PA)"
148 "(slot_dma=0x%llX), Header=%03d\n",
149 addr, (unsigned long long)slot->buf_dma, tag);
150 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
151 /*mvs_cmd_hdr */
152 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
153 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
154 /*1.command table area */
155 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
156 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
157 /*2.open address frame area */
158 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
159 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
160 (u32) slot->buf_dma + slot->cmd_size);
161 /*3.status buffer */
162 mvs_hba_sb_dump(mvi, tag, proto);
163 /*4.PRD table */
164 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
165 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
166 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
167 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
168#endif
169}
170
171static void mvs_hba_cq_dump(struct mvs_info *mvi)
172{
173#if (_MV_DUMP > 2)
174 u64 addr;
175 void __iomem *regs = mvi->regs;
176 u32 entry = mvi->rx_cons + 1;
177 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
178
179 /*Completion Queue */
180 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
181 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
182 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
183 dev_printk(KERN_DEBUG, mvi->dev,
184 "Completion List Base Address=0x%llX (PA), "
185 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
186 addr, entry - 1, mvi->rx[0]);
187 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
188 mvi->rx_dma + sizeof(u32) * entry);
189#endif
190}
191
192void mvs_get_sas_addr(void *buf, u32 buflen)
193{
194 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
195}
196
197struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 77struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
198{ 78{
199 unsigned long i = 0, j = 0, hi = 0; 79 unsigned long i = 0, j = 0, hi = 0;
@@ -222,7 +102,6 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
222 102
223} 103}
224 104
225/* FIXME */
226int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 105int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
227{ 106{
228 unsigned long i = 0, j = 0, n = 0, num = 0; 107 unsigned long i = 0, j = 0, n = 0, num = 0;
@@ -253,6 +132,20 @@ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
253 return num; 132 return num;
254} 133}
255 134
135struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
136 u8 reg_set)
137{
138 u32 dev_no;
139 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
140 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
141 continue;
142
143 if (mvi->devices[dev_no].taskfileset == reg_set)
144 return &mvi->devices[dev_no];
145 }
146 return NULL;
147}
148
256static inline void mvs_free_reg_set(struct mvs_info *mvi, 149static inline void mvs_free_reg_set(struct mvs_info *mvi,
257 struct mvs_device *dev) 150 struct mvs_device *dev)
258{ 151{
@@ -283,7 +176,6 @@ void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
283 } 176 }
284} 177}
285 178
286/* FIXME: locking? */
287int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 179int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
288 void *funcdata) 180 void *funcdata)
289{ 181{
@@ -309,12 +201,12 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
309 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); 201 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
310 if (tmp & PHY_RST_HARD) 202 if (tmp & PHY_RST_HARD)
311 break; 203 break;
312 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); 204 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
313 break; 205 break;
314 206
315 case PHY_FUNC_LINK_RESET: 207 case PHY_FUNC_LINK_RESET:
316 MVS_CHIP_DISP->phy_enable(mvi, phy_id); 208 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
317 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); 209 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
318 break; 210 break;
319 211
320 case PHY_FUNC_DISABLE: 212 case PHY_FUNC_DISABLE:
@@ -406,14 +298,10 @@ int mvs_slave_configure(struct scsi_device *sdev)
406 298
407 if (ret) 299 if (ret)
408 return ret; 300 return ret;
409 if (dev_is_sata(dev)) { 301 if (!dev_is_sata(dev)) {
410 /* may set PIO mode */ 302 sas_change_queue_depth(sdev,
411 #if MV_DISABLE_NCQ 303 MVS_QUEUE_SIZE,
412 struct ata_port *ap = dev->sata_dev.ap; 304 SCSI_QDEPTH_DEFAULT);
413 struct ata_device *adev = ap->link.device;
414 adev->flags |= ATA_DFLAG_NCQ_OFF;
415 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
416 #endif
417 } 305 }
418 return 0; 306 return 0;
419} 307}
@@ -424,6 +312,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
424 unsigned short core_nr; 312 unsigned short core_nr;
425 struct mvs_info *mvi; 313 struct mvs_info *mvi;
426 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 314 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
315 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
427 316
428 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 317 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
429 318
@@ -432,15 +321,17 @@ void mvs_scan_start(struct Scsi_Host *shost)
432 for (i = 0; i < mvi->chip->n_phy; ++i) 321 for (i = 0; i < mvi->chip->n_phy; ++i)
433 mvs_bytes_dmaed(mvi, i); 322 mvs_bytes_dmaed(mvi, i);
434 } 323 }
324 mvs_prv->scan_finished = 1;
435} 325}
436 326
437int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 327int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
438{ 328{
439 /* give the phy enabling interrupt event time to come in (1s 329 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
440 * is empirically about all it takes) */ 330 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
441 if (time < HZ) 331
332 if (mvs_prv->scan_finished == 0)
442 return 0; 333 return 0;
443 /* Wait for discovery to finish */ 334
444 scsi_flush_work(shost); 335 scsi_flush_work(shost);
445 return 1; 336 return 1;
446} 337}
@@ -461,10 +352,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
461 void *buf_prd; 352 void *buf_prd;
462 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 353 struct mvs_slot_info *slot = &mvi->slot_info[tag];
463 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 354 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
464#if _MV_DUMP 355
465 u8 *buf_cmd;
466 void *from;
467#endif
468 /* 356 /*
469 * DMA-map SMP request, response buffers 357 * DMA-map SMP request, response buffers
470 */ 358 */
@@ -496,15 +384,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
496 buf_tmp = slot->buf; 384 buf_tmp = slot->buf;
497 buf_tmp_dma = slot->buf_dma; 385 buf_tmp_dma = slot->buf_dma;
498 386
499#if _MV_DUMP
500 buf_cmd = buf_tmp;
501 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
502 buf_tmp += req_len;
503 buf_tmp_dma += req_len;
504 slot->cmd_size = req_len;
505#else
506 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); 387 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
507#endif
508 388
509 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 389 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
510 buf_oaf = buf_tmp; 390 buf_oaf = buf_tmp;
@@ -553,12 +433,6 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
553 /* fill in PRD (scatter/gather) table, if any */ 433 /* fill in PRD (scatter/gather) table, if any */
554 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 434 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
555 435
556#if _MV_DUMP
557 /* copy cmd table */
558 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
559 memcpy(buf_cmd, from + sg_req->offset, req_len);
560 kunmap_atomic(from, KM_IRQ0);
561#endif
562 return 0; 436 return 0;
563 437
564err_out_2: 438err_out_2:
@@ -616,14 +490,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
616 (mvi_dev->taskfileset << TXQ_SRS_SHIFT); 490 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
617 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); 491 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
618 492
619#ifndef DISABLE_HOTPLUG_DMA_FIX
620 if (task->data_dir == DMA_FROM_DEVICE) 493 if (task->data_dir == DMA_FROM_DEVICE)
621 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); 494 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
622 else 495 else
623 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 496 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
624#else 497
625 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
626#endif
627 if (task->ata_task.use_ncq) 498 if (task->ata_task.use_ncq)
628 flags |= MCH_FPDMA; 499 flags |= MCH_FPDMA;
629 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { 500 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
@@ -631,11 +502,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
631 flags |= MCH_ATAPI; 502 flags |= MCH_ATAPI;
632 } 503 }
633 504
634 /* FIXME: fill in port multiplier number */
635
636 hdr->flags = cpu_to_le32(flags); 505 hdr->flags = cpu_to_le32(flags);
637 506
638 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
639 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) 507 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
640 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 508 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
641 else 509 else
@@ -657,9 +525,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
657 525
658 buf_tmp += MVS_ATA_CMD_SZ; 526 buf_tmp += MVS_ATA_CMD_SZ;
659 buf_tmp_dma += MVS_ATA_CMD_SZ; 527 buf_tmp_dma += MVS_ATA_CMD_SZ;
660#if _MV_DUMP
661 slot->cmd_size = MVS_ATA_CMD_SZ;
662#endif
663 528
664 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 529 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
665 /* used for STP. unused for SATA? */ 530 /* used for STP. unused for SATA? */
@@ -682,9 +547,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
682 buf_tmp_dma += i; 547 buf_tmp_dma += i;
683 548
684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 549 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
685 /* FIXME: probably unused, for SATA. kept here just in case
686 * we get a STP/SATA error information record
687 */
688 slot->response = buf_tmp; 550 slot->response = buf_tmp;
689 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 551 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
690 if (mvi->flags & MVF_FLAG_SOC) 552 if (mvi->flags & MVF_FLAG_SOC)
@@ -715,11 +577,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
715 577
716 /* fill in PRD (scatter/gather) table, if any */ 578 /* fill in PRD (scatter/gather) table, if any */
717 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 579 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
718#ifndef DISABLE_HOTPLUG_DMA_FIX 580
719 if (task->data_dir == DMA_FROM_DEVICE) 581 if (task->data_dir == DMA_FROM_DEVICE)
720 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, 582 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
721 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); 583 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
722#endif 584
723 return 0; 585 return 0;
724} 586}
725 587
@@ -761,6 +623,9 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
761 } 623 }
762 if (is_tmf) 624 if (is_tmf)
763 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); 625 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
626 else
627 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
628
764 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); 629 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
765 hdr->tags = cpu_to_le32(tag); 630 hdr->tags = cpu_to_le32(tag);
766 hdr->data_len = cpu_to_le32(task->total_xfer_len); 631 hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -777,9 +642,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
777 642
778 buf_tmp += MVS_SSP_CMD_SZ; 643 buf_tmp += MVS_SSP_CMD_SZ;
779 buf_tmp_dma += MVS_SSP_CMD_SZ; 644 buf_tmp_dma += MVS_SSP_CMD_SZ;
780#if _MV_DUMP
781 slot->cmd_size = MVS_SSP_CMD_SZ;
782#endif
783 645
784 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 646 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
785 buf_oaf = buf_tmp; 647 buf_oaf = buf_tmp;
@@ -986,7 +848,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
986 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 848 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
987 spin_unlock(&task->task_state_lock); 849 spin_unlock(&task->task_state_lock);
988 850
989 mvs_hba_memory_dump(mvi, tag, task->task_proto);
990 mvi_dev->running_req++; 851 mvi_dev->running_req++;
991 ++(*pass); 852 ++(*pass);
992 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 853 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
@@ -1189,9 +1050,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1189 mvs_slot_free(mvi, slot_idx); 1050 mvs_slot_free(mvi, slot_idx);
1190} 1051}
1191 1052
1192static void mvs_update_wideport(struct mvs_info *mvi, int i) 1053static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
1193{ 1054{
1194 struct mvs_phy *phy = &mvi->phy[i]; 1055 struct mvs_phy *phy = &mvi->phy[phy_no];
1195 struct mvs_port *port = phy->port; 1056 struct mvs_port *port = phy->port;
1196 int j, no; 1057 int j, no;
1197 1058
@@ -1246,18 +1107,17 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1246 return NULL; 1107 return NULL;
1247 1108
1248 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 1109 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1249 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1110 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1250 1111
1251 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 1112 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1252 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1113 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1253 1114
1254 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 1115 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1255 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1116 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1256 1117
1257 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 1118 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1258 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1119 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1259 1120
1260 /* Workaround: take some ATAPI devices for ATA */
1261 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) 1121 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1262 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); 1122 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1263 1123
@@ -1269,6 +1129,13 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
1269 return irq_status & PHYEV_SIG_FIS; 1129 return irq_status & PHYEV_SIG_FIS;
1270} 1130}
1271 1131
1132static void mvs_sig_remove_timer(struct mvs_phy *phy)
1133{
1134 if (phy->timer.function)
1135 del_timer(&phy->timer);
1136 phy->timer.function = NULL;
1137}
1138
1272void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) 1139void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1273{ 1140{
1274 struct mvs_phy *phy = &mvi->phy[i]; 1141 struct mvs_phy *phy = &mvi->phy[i];
@@ -1291,6 +1158,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1291 if (phy->phy_type & PORT_TYPE_SATA) { 1158 if (phy->phy_type & PORT_TYPE_SATA) {
1292 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 1159 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1293 if (mvs_is_sig_fis_received(phy->irq_status)) { 1160 if (mvs_is_sig_fis_received(phy->irq_status)) {
1161 mvs_sig_remove_timer(phy);
1294 phy->phy_attached = 1; 1162 phy->phy_attached = 1;
1295 phy->att_dev_sas_addr = 1163 phy->att_dev_sas_addr =
1296 i + mvi->id * mvi->chip->n_phy; 1164 i + mvi->id * mvi->chip->n_phy;
@@ -1308,7 +1176,6 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1308 tmp | PHYEV_SIG_FIS); 1176 tmp | PHYEV_SIG_FIS);
1309 phy->phy_attached = 0; 1177 phy->phy_attached = 0;
1310 phy->phy_type &= ~PORT_TYPE_SATA; 1178 phy->phy_type &= ~PORT_TYPE_SATA;
1311 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1312 goto out_done; 1179 goto out_done;
1313 } 1180 }
1314 } else if (phy->phy_type & PORT_TYPE_SAS 1181 } else if (phy->phy_type & PORT_TYPE_SAS
@@ -1334,9 +1201,9 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1334 if (MVS_CHIP_DISP->phy_work_around) 1201 if (MVS_CHIP_DISP->phy_work_around)
1335 MVS_CHIP_DISP->phy_work_around(mvi, i); 1202 MVS_CHIP_DISP->phy_work_around(mvi, i);
1336 } 1203 }
1337 mv_dprintk("port %d attach dev info is %x\n", 1204 mv_dprintk("phy %d attach dev info is %x\n",
1338 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); 1205 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1339 mv_dprintk("port %d attach sas addr is %llx\n", 1206 mv_dprintk("phy %d attach sas addr is %llx\n",
1340 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); 1207 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1341out_done: 1208out_done:
1342 if (get_st) 1209 if (get_st)
@@ -1361,10 +1228,10 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1361 } 1228 }
1362 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; 1229 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1363 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; 1230 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1364 if (sas_port->id >= mvi->chip->n_phy) 1231 if (i >= mvi->chip->n_phy)
1365 port = &mvi->port[sas_port->id - mvi->chip->n_phy]; 1232 port = &mvi->port[i - mvi->chip->n_phy];
1366 else 1233 else
1367 port = &mvi->port[sas_port->id]; 1234 port = &mvi->port[i];
1368 if (lock) 1235 if (lock)
1369 spin_lock_irqsave(&mvi->lock, flags); 1236 spin_lock_irqsave(&mvi->lock, flags);
1370 port->port_attached = 1; 1237 port->port_attached = 1;
@@ -1393,7 +1260,7 @@ static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1393 return; 1260 return;
1394 } 1261 }
1395 list_for_each_entry(dev, &port->dev_list, dev_list_node) 1262 list_for_each_entry(dev, &port->dev_list, dev_list_node)
1396 mvs_do_release_task(phy->mvi, phy_no, NULL); 1263 mvs_do_release_task(phy->mvi, phy_no, dev);
1397 1264
1398} 1265}
1399 1266
@@ -1457,6 +1324,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
1457 mvi_device->dev_status = MVS_DEV_NORMAL; 1324 mvi_device->dev_status = MVS_DEV_NORMAL;
1458 mvi_device->dev_type = dev->dev_type; 1325 mvi_device->dev_type = dev->dev_type;
1459 mvi_device->mvi_info = mvi; 1326 mvi_device->mvi_info = mvi;
1327 mvi_device->sas_device = dev;
1460 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 1328 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1461 int phy_id; 1329 int phy_id;
1462 u8 phy_num = parent_dev->ex_dev.num_phys; 1330 u8 phy_num = parent_dev->ex_dev.num_phys;
@@ -1508,6 +1376,7 @@ void mvs_dev_gone_notify(struct domain_device *dev)
1508 mv_dprintk("found dev has gone.\n"); 1376 mv_dprintk("found dev has gone.\n");
1509 } 1377 }
1510 dev->lldd_dev = NULL; 1378 dev->lldd_dev = NULL;
1379 mvi_dev->sas_device = NULL;
1511 1380
1512 spin_unlock_irqrestore(&mvi->lock, flags); 1381 spin_unlock_irqrestore(&mvi->lock, flags);
1513} 1382}
@@ -1555,7 +1424,6 @@ static void mvs_tmf_timedout(unsigned long data)
1555 complete(&task->completion); 1424 complete(&task->completion);
1556} 1425}
1557 1426
1558/* XXX */
1559#define MVS_TASK_TIMEOUT 20 1427#define MVS_TASK_TIMEOUT 20
1560static int mvs_exec_internal_tmf_task(struct domain_device *dev, 1428static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1561 void *parameter, u32 para_len, struct mvs_tmf_task *tmf) 1429 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
@@ -1588,7 +1456,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1588 } 1456 }
1589 1457
1590 wait_for_completion(&task->completion); 1458 wait_for_completion(&task->completion);
1591 res = -TMF_RESP_FUNC_FAILED; 1459 res = TMF_RESP_FUNC_FAILED;
1592 /* Even TMF timed out, return direct. */ 1460 /* Even TMF timed out, return direct. */
1593 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1461 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1594 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1462 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -1638,11 +1506,10 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1638 u8 *lun, struct mvs_tmf_task *tmf) 1506 u8 *lun, struct mvs_tmf_task *tmf)
1639{ 1507{
1640 struct sas_ssp_task ssp_task; 1508 struct sas_ssp_task ssp_task;
1641 DECLARE_COMPLETION_ONSTACK(completion);
1642 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 1509 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1643 return TMF_RESP_FUNC_ESUPP; 1510 return TMF_RESP_FUNC_ESUPP;
1644 1511
1645 strncpy((u8 *)&ssp_task.LUN, lun, 8); 1512 memcpy(ssp_task.LUN, lun, 8);
1646 1513
1647 return mvs_exec_internal_tmf_task(dev, &ssp_task, 1514 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1648 sizeof(ssp_task), tmf); 1515 sizeof(ssp_task), tmf);
@@ -1666,7 +1533,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1666int mvs_lu_reset(struct domain_device *dev, u8 *lun) 1533int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1667{ 1534{
1668 unsigned long flags; 1535 unsigned long flags;
1669 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; 1536 int rc = TMF_RESP_FUNC_FAILED;
1670 struct mvs_tmf_task tmf_task; 1537 struct mvs_tmf_task tmf_task;
1671 struct mvs_device * mvi_dev = dev->lldd_dev; 1538 struct mvs_device * mvi_dev = dev->lldd_dev;
1672 struct mvs_info *mvi = mvi_dev->mvi_info; 1539 struct mvs_info *mvi = mvi_dev->mvi_info;
@@ -1675,10 +1542,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1675 mvi_dev->dev_status = MVS_DEV_EH; 1542 mvi_dev->dev_status = MVS_DEV_EH;
1676 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1543 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1677 if (rc == TMF_RESP_FUNC_COMPLETE) { 1544 if (rc == TMF_RESP_FUNC_COMPLETE) {
1678 num = mvs_find_dev_phyno(dev, phyno);
1679 spin_lock_irqsave(&mvi->lock, flags); 1545 spin_lock_irqsave(&mvi->lock, flags);
1680 for (i = 0; i < num; i++) 1546 mvs_release_task(mvi, dev);
1681 mvs_release_task(mvi, dev);
1682 spin_unlock_irqrestore(&mvi->lock, flags); 1547 spin_unlock_irqrestore(&mvi->lock, flags);
1683 } 1548 }
1684 /* If failed, fall-through I_T_Nexus reset */ 1549 /* If failed, fall-through I_T_Nexus reset */
@@ -1696,11 +1561,12 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
1696 1561
1697 if (mvi_dev->dev_status != MVS_DEV_EH) 1562 if (mvi_dev->dev_status != MVS_DEV_EH)
1698 return TMF_RESP_FUNC_COMPLETE; 1563 return TMF_RESP_FUNC_COMPLETE;
1564 else
1565 mvi_dev->dev_status = MVS_DEV_NORMAL;
1699 rc = mvs_debug_I_T_nexus_reset(dev); 1566 rc = mvs_debug_I_T_nexus_reset(dev);
1700 mv_printk("%s for device[%x]:rc= %d\n", 1567 mv_printk("%s for device[%x]:rc= %d\n",
1701 __func__, mvi_dev->device_id, rc); 1568 __func__, mvi_dev->device_id, rc);
1702 1569
1703 /* housekeeper */
1704 spin_lock_irqsave(&mvi->lock, flags); 1570 spin_lock_irqsave(&mvi->lock, flags);
1705 mvs_release_task(mvi, dev); 1571 mvs_release_task(mvi, dev);
1706 spin_unlock_irqrestore(&mvi->lock, flags); 1572 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1739,9 +1605,6 @@ int mvs_query_task(struct sas_task *task)
1739 case TMF_RESP_FUNC_FAILED: 1605 case TMF_RESP_FUNC_FAILED:
1740 case TMF_RESP_FUNC_COMPLETE: 1606 case TMF_RESP_FUNC_COMPLETE:
1741 break; 1607 break;
1742 default:
1743 rc = TMF_RESP_FUNC_COMPLETE;
1744 break;
1745 } 1608 }
1746 } 1609 }
1747 mv_printk("%s:rc= %d\n", __func__, rc); 1610 mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1761,8 +1624,8 @@ int mvs_abort_task(struct sas_task *task)
1761 u32 tag; 1624 u32 tag;
1762 1625
1763 if (!mvi_dev) { 1626 if (!mvi_dev) {
1764 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__); 1627 mv_printk("Device has removed\n");
1765 rc = TMF_RESP_FUNC_FAILED; 1628 return TMF_RESP_FUNC_FAILED;
1766 } 1629 }
1767 1630
1768 mvi = mvi_dev->mvi_info; 1631 mvi = mvi_dev->mvi_info;
@@ -1807,25 +1670,17 @@ int mvs_abort_task(struct sas_task *task)
1807 1670
1808 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1671 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1809 task->task_proto & SAS_PROTOCOL_STP) { 1672 task->task_proto & SAS_PROTOCOL_STP) {
1810 /* to do free register_set */
1811 if (SATA_DEV == dev->dev_type) { 1673 if (SATA_DEV == dev->dev_type) {
1812 struct mvs_slot_info *slot = task->lldd_task; 1674 struct mvs_slot_info *slot = task->lldd_task;
1813 struct task_status_struct *tstat;
1814 u32 slot_idx = (u32)(slot - mvi->slot_info); 1675 u32 slot_idx = (u32)(slot - mvi->slot_info);
1815 tstat = &task->task_status; 1676 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
1816 mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
1817 "slot=%p slot_idx=x%x\n", 1677 "slot=%p slot_idx=x%x\n",
1818 mvi, task, slot, slot_idx); 1678 mvi, task, slot, slot_idx);
1819 tstat->stat = SAS_ABORTED_TASK; 1679 mvs_tmf_timedout((unsigned long)task);
1820 if (mvi_dev && mvi_dev->running_req)
1821 mvi_dev->running_req--;
1822 if (sas_protocol_ata(task->task_proto))
1823 mvs_free_reg_set(mvi, mvi_dev);
1824 mvs_slot_task_free(mvi, task, slot, slot_idx); 1680 mvs_slot_task_free(mvi, task, slot, slot_idx);
1825 return -1; 1681 rc = TMF_RESP_FUNC_COMPLETE;
1682 goto out;
1826 } 1683 }
1827 } else {
1828 /* SMP */
1829 1684
1830 } 1685 }
1831out: 1686out:
@@ -1891,12 +1746,63 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1891 return stat; 1746 return stat;
1892} 1747}
1893 1748
1749void mvs_set_sense(u8 *buffer, int len, int d_sense,
1750 int key, int asc, int ascq)
1751{
1752 memset(buffer, 0, len);
1753
1754 if (d_sense) {
1755 /* Descriptor format */
1756 if (len < 4) {
1757 mv_printk("Length %d of sense buffer too small to "
1758 "fit sense %x:%x:%x", len, key, asc, ascq);
1759 }
1760
1761 buffer[0] = 0x72; /* Response Code */
1762 if (len > 1)
1763 buffer[1] = key; /* Sense Key */
1764 if (len > 2)
1765 buffer[2] = asc; /* ASC */
1766 if (len > 3)
1767 buffer[3] = ascq; /* ASCQ */
1768 } else {
1769 if (len < 14) {
1770 mv_printk("Length %d of sense buffer too small to "
1771 "fit sense %x:%x:%x", len, key, asc, ascq);
1772 }
1773
1774 buffer[0] = 0x70; /* Response Code */
1775 if (len > 2)
1776 buffer[2] = key; /* Sense Key */
1777 if (len > 7)
1778 buffer[7] = 0x0a; /* Additional Sense Length */
1779 if (len > 12)
1780 buffer[12] = asc; /* ASC */
1781 if (len > 13)
1782 buffer[13] = ascq; /* ASCQ */
1783 }
1784
1785 return;
1786}
1787
1788void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
1789 u8 key, u8 asc, u8 asc_q)
1790{
1791 iu->datapres = 2;
1792 iu->response_data_len = 0;
1793 iu->sense_data_len = 17;
1794 iu->status = 02;
1795 mvs_set_sense(iu->sense_data, 17, 0,
1796 key, asc, asc_q);
1797}
1798
1894static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1799static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1895 u32 slot_idx) 1800 u32 slot_idx)
1896{ 1801{
1897 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1802 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1898 int stat; 1803 int stat;
1899 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); 1804 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
1805 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
1900 u32 tfs = 0; 1806 u32 tfs = 0;
1901 enum mvs_port_type type = PORT_TYPE_SAS; 1807 enum mvs_port_type type = PORT_TYPE_SAS;
1902 1808
@@ -1908,8 +1814,19 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1908 stat = SAM_STAT_CHECK_CONDITION; 1814 stat = SAM_STAT_CHECK_CONDITION;
1909 switch (task->task_proto) { 1815 switch (task->task_proto) {
1910 case SAS_PROTOCOL_SSP: 1816 case SAS_PROTOCOL_SSP:
1817 {
1911 stat = SAS_ABORTED_TASK; 1818 stat = SAS_ABORTED_TASK;
1819 if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
1820 struct ssp_response_iu *iu = slot->response +
1821 sizeof(struct mvs_err_info);
1822 mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
1823 sas_ssp_task_response(mvi->dev, task, iu);
1824 stat = SAM_STAT_CHECK_CONDITION;
1825 }
1826 if (err_dw1 & bit(31))
1827 mv_printk("reuse same slot, retry command.\n");
1912 break; 1828 break;
1829 }
1913 case SAS_PROTOCOL_SMP: 1830 case SAS_PROTOCOL_SMP:
1914 stat = SAM_STAT_CHECK_CONDITION; 1831 stat = SAM_STAT_CHECK_CONDITION;
1915 break; 1832 break;
@@ -1918,10 +1835,8 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1918 case SAS_PROTOCOL_STP: 1835 case SAS_PROTOCOL_STP:
1919 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1836 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1920 { 1837 {
1921 if (err_dw0 == 0x80400002)
1922 mv_printk("find reserved error, why?\n");
1923
1924 task->ata_task.use_ncq = 0; 1838 task->ata_task.use_ncq = 0;
1839 stat = SAS_PROTO_RESPONSE;
1925 mvs_sata_done(mvi, task, slot_idx, err_dw0); 1840 mvs_sata_done(mvi, task, slot_idx, err_dw0);
1926 } 1841 }
1927 break; 1842 break;
@@ -1945,8 +1860,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1945 void *to; 1860 void *to;
1946 enum exec_status sts; 1861 enum exec_status sts;
1947 1862
1948 if (mvi->exp_req)
1949 mvi->exp_req--;
1950 if (unlikely(!task || !task->lldd_task || !task->dev)) 1863 if (unlikely(!task || !task->lldd_task || !task->dev))
1951 return -1; 1864 return -1;
1952 1865
@@ -1954,8 +1867,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1954 dev = task->dev; 1867 dev = task->dev;
1955 mvi_dev = dev->lldd_dev; 1868 mvi_dev = dev->lldd_dev;
1956 1869
1957 mvs_hba_cq_dump(mvi);
1958
1959 spin_lock(&task->task_state_lock); 1870 spin_lock(&task->task_state_lock);
1960 task->task_state_flags &= 1871 task->task_state_flags &=
1961 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1872 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
@@ -1978,6 +1889,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1978 return -1; 1889 return -1;
1979 } 1890 }
1980 1891
1892 /* when no device attaching, go ahead and complete by error handling*/
1981 if (unlikely(!mvi_dev || flags)) { 1893 if (unlikely(!mvi_dev || flags)) {
1982 if (!mvi_dev) 1894 if (!mvi_dev)
1983 mv_dprintk("port has not device.\n"); 1895 mv_dprintk("port has not device.\n");
@@ -1987,6 +1899,9 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1987 1899
1988 /* error info record present */ 1900 /* error info record present */
1989 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1901 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1902 mv_dprintk("port %d slot %d rx_desc %X has error info"
1903 "%016llX.\n", slot->port->sas_port.id, slot_idx,
1904 rx_desc, (u64)(*(u64 *)slot->response));
1990 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1905 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1991 tstat->resp = SAS_TASK_COMPLETE; 1906 tstat->resp = SAS_TASK_COMPLETE;
1992 goto out; 1907 goto out;
@@ -2048,8 +1963,7 @@ out:
2048 spin_unlock(&mvi->lock); 1963 spin_unlock(&mvi->lock);
2049 if (task->task_done) 1964 if (task->task_done)
2050 task->task_done(task); 1965 task->task_done(task);
2051 else 1966
2052 mv_dprintk("why has not task_done.\n");
2053 spin_lock(&mvi->lock); 1967 spin_lock(&mvi->lock);
2054 1968
2055 return sts; 1969 return sts;
@@ -2092,7 +2006,6 @@ void mvs_release_task(struct mvs_info *mvi,
2092 struct domain_device *dev) 2006 struct domain_device *dev)
2093{ 2007{
2094 int i, phyno[WIDE_PORT_MAX_PHY], num; 2008 int i, phyno[WIDE_PORT_MAX_PHY], num;
2095 /* housekeeper */
2096 num = mvs_find_dev_phyno(dev, phyno); 2009 num = mvs_find_dev_phyno(dev, phyno);
2097 for (i = 0; i < num; i++) 2010 for (i = 0; i < num; i++)
2098 mvs_do_release_task(mvi, phyno[i], dev); 2011 mvs_do_release_task(mvi, phyno[i], dev);
@@ -2111,13 +2024,13 @@ static void mvs_work_queue(struct work_struct *work)
2111 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); 2024 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
2112 struct mvs_info *mvi = mwq->mvi; 2025 struct mvs_info *mvi = mwq->mvi;
2113 unsigned long flags; 2026 unsigned long flags;
2027 u32 phy_no = (unsigned long) mwq->data;
2028 struct sas_ha_struct *sas_ha = mvi->sas;
2029 struct mvs_phy *phy = &mvi->phy[phy_no];
2030 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2114 2031
2115 spin_lock_irqsave(&mvi->lock, flags); 2032 spin_lock_irqsave(&mvi->lock, flags);
2116 if (mwq->handler & PHY_PLUG_EVENT) { 2033 if (mwq->handler & PHY_PLUG_EVENT) {
2117 u32 phy_no = (unsigned long) mwq->data;
2118 struct sas_ha_struct *sas_ha = mvi->sas;
2119 struct mvs_phy *phy = &mvi->phy[phy_no];
2120 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2121 2034
2122 if (phy->phy_event & PHY_PLUG_OUT) { 2035 if (phy->phy_event & PHY_PLUG_OUT) {
2123 u32 tmp; 2036 u32 tmp;
@@ -2139,6 +2052,11 @@ static void mvs_work_queue(struct work_struct *work)
2139 mv_dprintk("phy%d Attached Device\n", phy_no); 2052 mv_dprintk("phy%d Attached Device\n", phy_no);
2140 } 2053 }
2141 } 2054 }
2055 } else if (mwq->handler & EXP_BRCT_CHG) {
2056 phy->phy_event &= ~EXP_BRCT_CHG;
2057 sas_ha->notify_port_event(sas_phy,
2058 PORTE_BROADCAST_RCVD);
2059 mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
2142 } 2060 }
2143 list_del(&mwq->entry); 2061 list_del(&mwq->entry);
2144 spin_unlock_irqrestore(&mvi->lock, flags); 2062 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -2174,29 +2092,21 @@ static void mvs_sig_time_out(unsigned long tphy)
2174 if (&mvi->phy[phy_no] == phy) { 2092 if (&mvi->phy[phy_no] == phy) {
2175 mv_dprintk("Get signature time out, reset phy %d\n", 2093 mv_dprintk("Get signature time out, reset phy %d\n",
2176 phy_no+mvi->id*mvi->chip->n_phy); 2094 phy_no+mvi->id*mvi->chip->n_phy);
2177 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); 2095 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
2178 } 2096 }
2179 } 2097 }
2180} 2098}
2181 2099
2182static void mvs_sig_remove_timer(struct mvs_phy *phy)
2183{
2184 if (phy->timer.function)
2185 del_timer(&phy->timer);
2186 phy->timer.function = NULL;
2187}
2188
2189void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 2100void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2190{ 2101{
2191 u32 tmp; 2102 u32 tmp;
2192 struct sas_ha_struct *sas_ha = mvi->sas;
2193 struct mvs_phy *phy = &mvi->phy[phy_no]; 2103 struct mvs_phy *phy = &mvi->phy[phy_no];
2194 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2195 2104
2196 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); 2105 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2197 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, 2106 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2107 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2198 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); 2108 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2199 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, 2109 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
2200 phy->irq_status); 2110 phy->irq_status);
2201 2111
2202 /* 2112 /*
@@ -2205,11 +2115,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2205 */ 2115 */
2206 2116
2207 if (phy->irq_status & PHYEV_DCDR_ERR) { 2117 if (phy->irq_status & PHYEV_DCDR_ERR) {
2208 mv_dprintk("port %d STP decoding error.\n", 2118 mv_dprintk("phy %d STP decoding error.\n",
2209 phy_no + mvi->id*mvi->chip->n_phy); 2119 phy_no + mvi->id*mvi->chip->n_phy);
2210 } 2120 }
2211 2121
2212 if (phy->irq_status & PHYEV_POOF) { 2122 if (phy->irq_status & PHYEV_POOF) {
2123 mdelay(500);
2213 if (!(phy->phy_event & PHY_PLUG_OUT)) { 2124 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2214 int dev_sata = phy->phy_type & PORT_TYPE_SATA; 2125 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2215 int ready; 2126 int ready;
@@ -2220,17 +2131,13 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2220 (void *)(unsigned long)phy_no, 2131 (void *)(unsigned long)phy_no,
2221 PHY_PLUG_EVENT); 2132 PHY_PLUG_EVENT);
2222 ready = mvs_is_phy_ready(mvi, phy_no); 2133 ready = mvs_is_phy_ready(mvi, phy_no);
2223 if (!ready)
2224 mv_dprintk("phy%d Unplug Notice\n",
2225 phy_no +
2226 mvi->id * mvi->chip->n_phy);
2227 if (ready || dev_sata) { 2134 if (ready || dev_sata) {
2228 if (MVS_CHIP_DISP->stp_reset) 2135 if (MVS_CHIP_DISP->stp_reset)
2229 MVS_CHIP_DISP->stp_reset(mvi, 2136 MVS_CHIP_DISP->stp_reset(mvi,
2230 phy_no); 2137 phy_no);
2231 else 2138 else
2232 MVS_CHIP_DISP->phy_reset(mvi, 2139 MVS_CHIP_DISP->phy_reset(mvi,
2233 phy_no, 0); 2140 phy_no, MVS_SOFT_RESET);
2234 return; 2141 return;
2235 } 2142 }
2236 } 2143 }
@@ -2243,13 +2150,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2243 if (phy->timer.function == NULL) { 2150 if (phy->timer.function == NULL) {
2244 phy->timer.data = (unsigned long)phy; 2151 phy->timer.data = (unsigned long)phy;
2245 phy->timer.function = mvs_sig_time_out; 2152 phy->timer.function = mvs_sig_time_out;
2246 phy->timer.expires = jiffies + 10*HZ; 2153 phy->timer.expires = jiffies + 5*HZ;
2247 add_timer(&phy->timer); 2154 add_timer(&phy->timer);
2248 } 2155 }
2249 } 2156 }
2250 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 2157 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2251 phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 2158 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2252 mvs_sig_remove_timer(phy);
2253 mv_dprintk("notify plug in on phy[%d]\n", phy_no); 2159 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2254 if (phy->phy_status) { 2160 if (phy->phy_status) {
2255 mdelay(10); 2161 mdelay(10);
@@ -2263,14 +2169,14 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2263 } 2169 }
2264 mvs_update_phyinfo(mvi, phy_no, 0); 2170 mvs_update_phyinfo(mvi, phy_no, 0);
2265 if (phy->phy_type & PORT_TYPE_SAS) { 2171 if (phy->phy_type & PORT_TYPE_SAS) {
2266 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2); 2172 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
2267 mdelay(10); 2173 mdelay(10);
2268 } 2174 }
2269 2175
2270 mvs_bytes_dmaed(mvi, phy_no); 2176 mvs_bytes_dmaed(mvi, phy_no);
2271 /* whether driver is going to handle hot plug */ 2177 /* whether driver is going to handle hot plug */
2272 if (phy->phy_event & PHY_PLUG_OUT) { 2178 if (phy->phy_event & PHY_PLUG_OUT) {
2273 mvs_port_notify_formed(sas_phy, 0); 2179 mvs_port_notify_formed(&phy->sas_phy, 0);
2274 phy->phy_event &= ~PHY_PLUG_OUT; 2180 phy->phy_event &= ~PHY_PLUG_OUT;
2275 } 2181 }
2276 } else { 2182 } else {
@@ -2278,13 +2184,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2278 phy_no + mvi->id*mvi->chip->n_phy); 2184 phy_no + mvi->id*mvi->chip->n_phy);
2279 } 2185 }
2280 } else if (phy->irq_status & PHYEV_BROAD_CH) { 2186 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2281 mv_dprintk("port %d broadcast change.\n", 2187 mv_dprintk("phy %d broadcast change.\n",
2282 phy_no + mvi->id*mvi->chip->n_phy); 2188 phy_no + mvi->id*mvi->chip->n_phy);
2283 /* exception for Samsung disk drive*/ 2189 mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
2284 mdelay(1000); 2190 EXP_BRCT_CHG);
2285 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2286 } 2191 }
2287 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2288} 2192}
2289 2193
2290int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 2194int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 1367d8b9350d..44d7885a4a1d 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -48,12 +48,8 @@
48 48
49#define DRV_NAME "mvsas" 49#define DRV_NAME "mvsas"
50#define DRV_VERSION "0.8.2" 50#define DRV_VERSION "0.8.2"
51#define _MV_DUMP 0
52#define MVS_ID_NOT_MAPPED 0x7f 51#define MVS_ID_NOT_MAPPED 0x7f
53/* #define DISABLE_HOTPLUG_DMA_FIX */
54// #define MAX_EXP_RUNNING_REQ 2
55#define WIDE_PORT_MAX_PHY 4 52#define WIDE_PORT_MAX_PHY 4
56#define MV_DISABLE_NCQ 0
57#define mv_printk(fmt, arg ...) \ 53#define mv_printk(fmt, arg ...) \
58 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) 54 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
59#ifdef MV_DEBUG 55#ifdef MV_DEBUG
@@ -64,6 +60,7 @@
64#endif 60#endif
65#define MV_MAX_U32 0xffffffff 61#define MV_MAX_U32 0xffffffff
66 62
63extern int interrupt_coalescing;
67extern struct mvs_tgt_initiator mvs_tgt; 64extern struct mvs_tgt_initiator mvs_tgt;
68extern struct mvs_info *tgt_mvi; 65extern struct mvs_info *tgt_mvi;
69extern const struct mvs_dispatch mvs_64xx_dispatch; 66extern const struct mvs_dispatch mvs_64xx_dispatch;
@@ -99,6 +96,11 @@ enum dev_status {
99 MVS_DEV_EH = 0x1, 96 MVS_DEV_EH = 0x1,
100}; 97};
101 98
99enum dev_reset {
100 MVS_SOFT_RESET = 0,
101 MVS_HARD_RESET = 1,
102 MVS_PHY_TUNE = 2,
103};
102 104
103struct mvs_info; 105struct mvs_info;
104 106
@@ -130,7 +132,6 @@ struct mvs_dispatch {
130 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); 132 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
131 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); 133 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
132 134
133 void (*get_sas_addr)(void *buf, u32 buflen);
134 void (*command_active)(struct mvs_info *mvi, u32 slot_idx); 135 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
135 void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all); 136 void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
136 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, 137 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
@@ -167,9 +168,10 @@ struct mvs_dispatch {
167 ); 168 );
168 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); 169 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
169 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); 170 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
170#ifndef DISABLE_HOTPLUG_DMA_FIX 171 void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask,
171 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd); 172 int buf_len, int from, void *prd);
172#endif 173 void (*tune_interrupt)(struct mvs_info *mvi, u32 time);
174 void (*non_spec_ncq_error)(struct mvs_info *mvi);
173 175
174}; 176};
175 177
@@ -179,9 +181,11 @@ struct mvs_chip_info {
179 u32 fis_offs; 181 u32 fis_offs;
180 u32 fis_count; 182 u32 fis_count;
181 u32 srs_sz; 183 u32 srs_sz;
184 u32 sg_width;
182 u32 slot_width; 185 u32 slot_width;
183 const struct mvs_dispatch *dispatch; 186 const struct mvs_dispatch *dispatch;
184}; 187};
188#define MVS_MAX_SG (1U << mvi->chip->sg_width)
185#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 189#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
186#define MVS_RX_FISL_SZ \ 190#define MVS_RX_FISL_SZ \
187 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) 191 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
@@ -249,6 +253,73 @@ struct mvs_device {
249 u16 reserved; 253 u16 reserved;
250}; 254};
251 255
256/* Generate PHY tunning parameters */
257struct phy_tuning {
258 /* 1 bit, transmitter emphasis enable */
259 u8 trans_emp_en:1;
260 /* 4 bits, transmitter emphasis amplitude */
261 u8 trans_emp_amp:4;
262 /* 3 bits, reserved space */
263 u8 Reserved_2bit_1:3;
264 /* 5 bits, transmitter amplitude */
265 u8 trans_amp:5;
266 /* 2 bits, transmitter amplitude adjust */
267 u8 trans_amp_adj:2;
268 /* 1 bit, reserved space */
269 u8 resv_2bit_2:1;
270 /* 2 bytes, reserved space */
271 u8 reserved[2];
272};
273
274struct ffe_control {
275 /* 4 bits, FFE Capacitor Select (value range 0~F) */
276 u8 ffe_cap_sel:4;
277 /* 3 bits, FFE Resistor Select (value range 0~7) */
278 u8 ffe_rss_sel:3;
279 /* 1 bit reserve*/
280 u8 reserved:1;
281};
282
283/*
284 * HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes.
285 * The data area is valid only Signature="MRVL".
286 * If any member fills with 0xFF, the member is invalid.
287 */
288struct hba_info_page {
289 /* Dword 0 */
290 /* 4 bytes, structure signature,should be "MRVL" at first initial */
291 u8 signature[4];
292
293 /* Dword 1-13 */
294 u32 reserved1[13];
295
296 /* Dword 14-29 */
297 /* 64 bytes, SAS address for each port */
298 u64 sas_addr[8];
299
300 /* Dword 30-31 */
301 /* 8 bytes for vanir 8 port PHY FFE seeting
302 * BIT 0~3 : FFE Capacitor select(value range 0~F)
303 * BIT 4~6 : FFE Resistor select(value range 0~7)
304 * BIT 7: reserve.
305 */
306
307 struct ffe_control ffe_ctl[8];
308 /* Dword 32 -43 */
309 u32 reserved2[12];
310
311 /* Dword 44-45 */
312 /* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */
313 u8 phy_rate[8];
314
315 /* Dword 46-53 */
316 /* 32 bytes, PHY tuning parameters for each PHY*/
317 struct phy_tuning phy_tuning[8];
318
319 /* Dword 54-63 */
320 u32 reserved3[10];
321}; /* total 256 bytes */
322
252struct mvs_slot_info { 323struct mvs_slot_info {
253 struct list_head entry; 324 struct list_head entry;
254 union { 325 union {
@@ -264,9 +335,6 @@ struct mvs_slot_info {
264 */ 335 */
265 void *buf; 336 void *buf;
266 dma_addr_t buf_dma; 337 dma_addr_t buf_dma;
267#if _MV_DUMP
268 u32 cmd_size;
269#endif
270 void *response; 338 void *response;
271 struct mvs_port *port; 339 struct mvs_port *port;
272 struct mvs_device *device; 340 struct mvs_device *device;
@@ -320,12 +388,10 @@ struct mvs_info {
320 const struct mvs_chip_info *chip; 388 const struct mvs_chip_info *chip;
321 389
322 int tags_num; 390 int tags_num;
323 DECLARE_BITMAP(tags, MVS_SLOTS); 391 unsigned long *tags;
324 /* further per-slot information */ 392 /* further per-slot information */
325 struct mvs_phy phy[MVS_MAX_PHYS]; 393 struct mvs_phy phy[MVS_MAX_PHYS];
326 struct mvs_port port[MVS_MAX_PHYS]; 394 struct mvs_port port[MVS_MAX_PHYS];
327 u32 irq;
328 u32 exp_req;
329 u32 id; 395 u32 id;
330 u64 sata_reg_set; 396 u64 sata_reg_set;
331 struct list_head *hba_list; 397 struct list_head *hba_list;
@@ -337,12 +403,13 @@ struct mvs_info {
337 u32 flashsectSize; 403 u32 flashsectSize;
338 404
339 void *addon; 405 void *addon;
406 struct hba_info_page hba_info_param;
340 struct mvs_device devices[MVS_MAX_DEVICES]; 407 struct mvs_device devices[MVS_MAX_DEVICES];
341#ifndef DISABLE_HOTPLUG_DMA_FIX
342 void *bulk_buffer; 408 void *bulk_buffer;
343 dma_addr_t bulk_buffer_dma; 409 dma_addr_t bulk_buffer_dma;
410 void *bulk_buffer1;
411 dma_addr_t bulk_buffer_dma1;
344#define TRASH_BUCKET_SIZE 0x20000 412#define TRASH_BUCKET_SIZE 0x20000
345#endif
346 void *dma_pool; 413 void *dma_pool;
347 struct mvs_slot_info slot_info[0]; 414 struct mvs_slot_info slot_info[0];
348}; 415};
@@ -350,8 +417,10 @@ struct mvs_info {
350struct mvs_prv_info{ 417struct mvs_prv_info{
351 u8 n_host; 418 u8 n_host;
352 u8 n_phy; 419 u8 n_phy;
353 u16 reserve; 420 u8 scan_finished;
421 u8 reserve;
354 struct mvs_info *mvi[2]; 422 struct mvs_info *mvi[2];
423 struct tasklet_struct mv_tasklet;
355}; 424};
356 425
357struct mvs_wq { 426struct mvs_wq {
@@ -415,6 +484,6 @@ void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
415void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); 484void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
416void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); 485void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
417int mvs_int_rx(struct mvs_info *mvi, bool self_clear); 486int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
418void mvs_hexdump(u32 size, u8 *data, u32 baseaddr); 487struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set);
419#endif 488#endif
420 489
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index fca6a8953070..d079f9a3c6b3 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3871,6 +3871,9 @@ static long pmcraid_ioctl_passthrough(
3871 pmcraid_err("couldn't build passthrough ioadls\n"); 3871 pmcraid_err("couldn't build passthrough ioadls\n");
3872 goto out_free_buffer; 3872 goto out_free_buffer;
3873 } 3873 }
3874 } else if (request_size < 0) {
3875 rc = -EINVAL;
3876 goto out_free_buffer;
3874 } 3877 }
3875 3878
3876 /* If data is being written into the device, copy the data from user 3879 /* If data is being written into the device, copy the data from user
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 532313e0725e..7836eb01c7fc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
42 int reading; 42 int reading;
43 43
44 if (IS_QLA82XX(ha)) { 44 if (IS_QLA82XX(ha)) {
45 DEBUG2(qla_printk(KERN_INFO, ha, 45 ql_dbg(ql_dbg_user, vha, 0x705b,
46 "Firmware dump not supported for ISP82xx\n")); 46 "Firmware dump not supported for ISP82xx\n");
47 return count; 47 return count;
48 } 48 }
49 49
@@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
56 if (!ha->fw_dump_reading) 56 if (!ha->fw_dump_reading)
57 break; 57 break;
58 58
59 qla_printk(KERN_INFO, ha, 59 ql_log(ql_log_info, vha, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha->host_no); 60 "Firmware dump cleared on (%ld).\n", vha->host_no);
61 61
62 ha->fw_dump_reading = 0; 62 ha->fw_dump_reading = 0;
@@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
66 if (ha->fw_dumped && !ha->fw_dump_reading) { 66 if (ha->fw_dumped && !ha->fw_dump_reading) {
67 ha->fw_dump_reading = 1; 67 ha->fw_dump_reading = 1;
68 68
69 qla_printk(KERN_INFO, ha, 69 ql_log(ql_log_info, vha, 0x705e,
70 "Raw firmware dump ready for read on (%ld).\n", 70 "Raw firmware dump ready for read on (%ld).\n",
71 vha->host_no); 71 vha->host_no);
72 } 72 }
@@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
148 } 148 }
149 149
150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
151 qla_printk(KERN_WARNING, ha, 151 ql_log(ql_log_warn, vha, 0x705f,
152 "HBA not online, failing NVRAM update.\n"); 152 "HBA not online, failing NVRAM update.\n");
153 return -EAGAIN; 153 return -EAGAIN;
154 } 154 }
@@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
159 count); 159 count);
160 160
161 ql_dbg(ql_dbg_user, vha, 0x7060,
162 "Setting ISP_ABORT_NEEDED\n");
161 /* NVRAM settings take effect immediately. */ 163 /* NVRAM settings take effect immediately. */
162 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 164 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
163 qla2xxx_wake_dpc(vha); 165 qla2xxx_wake_dpc(vha);
@@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
255 257
256 ha->optrom_state = QLA_SWAITING; 258 ha->optrom_state = QLA_SWAITING;
257 259
258 DEBUG2(qla_printk(KERN_INFO, ha, 260 ql_dbg(ql_dbg_user, vha, 0x7061,
259 "Freeing flash region allocation -- 0x%x bytes.\n", 261 "Freeing flash region allocation -- 0x%x bytes.\n",
260 ha->optrom_region_size)); 262 ha->optrom_region_size);
261 263
262 vfree(ha->optrom_buffer); 264 vfree(ha->optrom_buffer);
263 ha->optrom_buffer = NULL; 265 ha->optrom_buffer = NULL;
@@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
273 ha->optrom_state = QLA_SREADING; 275 ha->optrom_state = QLA_SREADING;
274 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 276 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
275 if (ha->optrom_buffer == NULL) { 277 if (ha->optrom_buffer == NULL) {
276 qla_printk(KERN_WARNING, ha, 278 ql_log(ql_log_warn, vha, 0x7062,
277 "Unable to allocate memory for optrom retrieval " 279 "Unable to allocate memory for optrom retrieval "
278 "(%x).\n", ha->optrom_region_size); 280 "(%x).\n", ha->optrom_region_size);
279 281
@@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
282 } 284 }
283 285
284 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 286 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
285 qla_printk(KERN_WARNING, ha, 287 ql_log(ql_log_warn, vha, 0x7063,
286 "HBA not online, failing NVRAM update.\n"); 288 "HBA not online, failing NVRAM update.\n");
287 return -EAGAIN; 289 return -EAGAIN;
288 } 290 }
289 291
290 DEBUG2(qla_printk(KERN_INFO, ha, 292 ql_dbg(ql_dbg_user, vha, 0x7064,
291 "Reading flash region -- 0x%x/0x%x.\n", 293 "Reading flash region -- 0x%x/0x%x.\n",
292 ha->optrom_region_start, ha->optrom_region_size)); 294 ha->optrom_region_start, ha->optrom_region_size);
293 295
294 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 296 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
295 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 297 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
@@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
328 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) 330 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
329 valid = 1; 331 valid = 1;
330 if (!valid) { 332 if (!valid) {
331 qla_printk(KERN_WARNING, ha, 333 ql_log(ql_log_warn, vha, 0x7065,
332 "Invalid start region 0x%x/0x%x.\n", start, size); 334 "Invalid start region 0x%x/0x%x.\n", start, size);
333 return -EINVAL; 335 return -EINVAL;
334 } 336 }
@@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
340 ha->optrom_state = QLA_SWRITING; 342 ha->optrom_state = QLA_SWRITING;
341 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 343 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
342 if (ha->optrom_buffer == NULL) { 344 if (ha->optrom_buffer == NULL) {
343 qla_printk(KERN_WARNING, ha, 345 ql_log(ql_log_warn, vha, 0x7066,
344 "Unable to allocate memory for optrom update " 346 "Unable to allocate memory for optrom update "
345 "(%x).\n", ha->optrom_region_size); 347 "(%x)\n", ha->optrom_region_size);
346 348
347 ha->optrom_state = QLA_SWAITING; 349 ha->optrom_state = QLA_SWAITING;
348 return count; 350 return count;
349 } 351 }
350 352
351 DEBUG2(qla_printk(KERN_INFO, ha, 353 ql_dbg(ql_dbg_user, vha, 0x7067,
352 "Staging flash region write -- 0x%x/0x%x.\n", 354 "Staging flash region write -- 0x%x/0x%x.\n",
353 ha->optrom_region_start, ha->optrom_region_size)); 355 ha->optrom_region_start, ha->optrom_region_size);
354 356
355 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 357 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
356 break; 358 break;
@@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
359 break; 361 break;
360 362
361 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 363 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
362 qla_printk(KERN_WARNING, ha, 364 ql_log(ql_log_warn, vha, 0x7068,
363 "HBA not online, failing flash update.\n"); 365 "HBA not online, failing flash update.\n");
364 return -EAGAIN; 366 return -EAGAIN;
365 } 367 }
366 368
367 DEBUG2(qla_printk(KERN_INFO, ha, 369 ql_dbg(ql_dbg_user, vha, 0x7069,
368 "Writing flash region -- 0x%x/0x%x.\n", 370 "Writing flash region -- 0x%x/0x%x.\n",
369 ha->optrom_region_start, ha->optrom_region_size)); 371 ha->optrom_region_start, ha->optrom_region_size);
370 372
371 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 373 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
372 ha->optrom_region_start, ha->optrom_region_size); 374 ha->optrom_region_start, ha->optrom_region_size);
@@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
425 return 0; 427 return 0;
426 428
427 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 429 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
428 qla_printk(KERN_WARNING, ha, 430 ql_log(ql_log_warn, vha, 0x706a,
429 "HBA not online, failing VPD update.\n"); 431 "HBA not online, failing VPD update.\n");
430 return -EAGAIN; 432 return -EAGAIN;
431 } 433 }
@@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
440 442
441 tmp_data = vmalloc(256); 443 tmp_data = vmalloc(256);
442 if (!tmp_data) { 444 if (!tmp_data) {
443 qla_printk(KERN_WARNING, ha, 445 ql_log(ql_log_warn, vha, 0x706b,
444 "Unable to allocate memory for VPD information update.\n"); 446 "Unable to allocate memory for VPD information update.\n");
445 goto done; 447 goto done;
446 } 448 }
@@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
480 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 482 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
481 &ha->sfp_data_dma); 483 &ha->sfp_data_dma);
482 if (!ha->sfp_data) { 484 if (!ha->sfp_data) {
483 qla_printk(KERN_WARNING, ha, 485 ql_log(ql_log_warn, vha, 0x706c,
484 "Unable to allocate memory for SFP read-data.\n"); 486 "Unable to allocate memory for SFP read-data.\n");
485 return 0; 487 return 0;
486 } 488 }
@@ -499,9 +501,10 @@ do_read:
499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, 501 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
500 addr, offset, SFP_BLOCK_SIZE, 0); 502 addr, offset, SFP_BLOCK_SIZE, 0);
501 if (rval != QLA_SUCCESS) { 503 if (rval != QLA_SUCCESS) {
502 qla_printk(KERN_WARNING, ha, 504 ql_log(ql_log_warn, vha, 0x706d,
503 "Unable to read SFP data (%x/%x/%x).\n", rval, 505 "Unable to read SFP data (%x/%x/%x).\n", rval,
504 addr, offset); 506 addr, offset);
507
505 count = 0; 508 count = 0;
506 break; 509 break;
507 } 510 }
@@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
538 type = simple_strtol(buf, NULL, 10); 541 type = simple_strtol(buf, NULL, 10);
539 switch (type) { 542 switch (type) {
540 case 0x2025c: 543 case 0x2025c:
541 qla_printk(KERN_INFO, ha, 544 ql_log(ql_log_info, vha, 0x706e,
542 "Issuing ISP reset on (%ld).\n", vha->host_no); 545 "Issuing ISP reset.\n");
543 546
544 scsi_block_requests(vha->host); 547 scsi_block_requests(vha->host);
545 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 548 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
551 if (!IS_QLA81XX(ha)) 554 if (!IS_QLA81XX(ha))
552 break; 555 break;
553 556
554 qla_printk(KERN_INFO, ha, 557 ql_log(ql_log_info, vha, 0x706f,
555 "Issuing MPI reset on (%ld).\n", vha->host_no); 558 "Issuing MPI reset.\n");
556 559
557 /* Make sure FC side is not in reset */ 560 /* Make sure FC side is not in reset */
558 qla2x00_wait_for_hba_online(vha); 561 qla2x00_wait_for_hba_online(vha);
@@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
560 /* Issue MPI reset */ 563 /* Issue MPI reset */
561 scsi_block_requests(vha->host); 564 scsi_block_requests(vha->host);
562 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 565 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
563 qla_printk(KERN_WARNING, ha, 566 ql_log(ql_log_warn, vha, 0x7070,
564 "MPI reset failed on (%ld).\n", vha->host_no); 567 "MPI reset failed.\n");
565 scsi_unblock_requests(vha->host); 568 scsi_unblock_requests(vha->host);
566 break; 569 break;
567 case 0x2025e: 570 case 0x2025e:
568 if (!IS_QLA82XX(ha) || vha != base_vha) { 571 if (!IS_QLA82XX(ha) || vha != base_vha) {
569 qla_printk(KERN_INFO, ha, 572 ql_log(ql_log_info, vha, 0x7071,
570 "FCoE ctx reset not supported for host%ld.\n", 573 "FCoE ctx reset no supported.\n");
571 vha->host_no);
572 return count; 574 return count;
573 } 575 }
574 576
575 qla_printk(KERN_INFO, ha, 577 ql_log(ql_log_info, vha, 0x7072,
576 "Issuing FCoE CTX reset on host%ld.\n", vha->host_no); 578 "Issuing FCoE ctx reset.\n");
577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 579 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
578 qla2xxx_wake_dpc(vha); 580 qla2xxx_wake_dpc(vha);
579 qla2x00_wait_for_fcoe_ctx_reset(vha); 581 qla2x00_wait_for_fcoe_ctx_reset(vha);
@@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
611 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 613 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
612 &ha->edc_data_dma); 614 &ha->edc_data_dma);
613 if (!ha->edc_data) { 615 if (!ha->edc_data) {
614 DEBUG2(qla_printk(KERN_INFO, ha, 616 ql_log(ql_log_warn, vha, 0x7073,
615 "Unable to allocate memory for EDC write.\n")); 617 "Unable to allocate memory for EDC write.\n");
616 return 0; 618 return 0;
617 } 619 }
618 } 620 }
@@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
631 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data, 633 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
632 dev, adr, len, opt); 634 dev, adr, len, opt);
633 if (rval != QLA_SUCCESS) { 635 if (rval != QLA_SUCCESS) {
634 DEBUG2(qla_printk(KERN_INFO, ha, 636 ql_log(ql_log_warn, vha, 0x7074,
635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 637 "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
636 rval, dev, adr, opt, len, buf[8])); 638 rval, dev, adr, opt, len, buf[8]);
637 return 0; 639 return 0;
638 } 640 }
639 641
@@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
669 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 671 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
670 &ha->edc_data_dma); 672 &ha->edc_data_dma);
671 if (!ha->edc_data) { 673 if (!ha->edc_data) {
672 DEBUG2(qla_printk(KERN_INFO, ha, 674 ql_log(ql_log_warn, vha, 0x708c,
673 "Unable to allocate memory for EDC status.\n")); 675 "Unable to allocate memory for EDC status.\n");
674 return 0; 676 return 0;
675 } 677 }
676 } 678 }
@@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
688 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data, 690 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
689 dev, adr, len, opt); 691 dev, adr, len, opt);
690 if (rval != QLA_SUCCESS) { 692 if (rval != QLA_SUCCESS) {
691 DEBUG2(qla_printk(KERN_INFO, ha, 693 ql_log(ql_log_info, vha, 0x7075,
692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 694 "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
693 rval, dev, adr, opt, len)); 695 rval, dev, adr, opt, len);
694 return 0; 696 return 0;
695 } 697 }
696 698
@@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
749 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 751 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
750 &ha->xgmac_data_dma, GFP_KERNEL); 752 &ha->xgmac_data_dma, GFP_KERNEL);
751 if (!ha->xgmac_data) { 753 if (!ha->xgmac_data) {
752 qla_printk(KERN_WARNING, ha, 754 ql_log(ql_log_warn, vha, 0x7076,
753 "Unable to allocate memory for XGMAC read-data.\n"); 755 "Unable to allocate memory for XGMAC read-data.\n");
754 return 0; 756 return 0;
755 } 757 }
@@ -761,7 +763,7 @@ do_read:
761 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 763 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
762 XGMAC_DATA_SIZE, &actual_size); 764 XGMAC_DATA_SIZE, &actual_size);
763 if (rval != QLA_SUCCESS) { 765 if (rval != QLA_SUCCESS) {
764 qla_printk(KERN_WARNING, ha, 766 ql_log(ql_log_warn, vha, 0x7077,
765 "Unable to read XGMAC data (%x).\n", rval); 767 "Unable to read XGMAC data (%x).\n", rval);
766 count = 0; 768 count = 0;
767 } 769 }
@@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
801 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 803 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
802 &ha->dcbx_tlv_dma, GFP_KERNEL); 804 &ha->dcbx_tlv_dma, GFP_KERNEL);
803 if (!ha->dcbx_tlv) { 805 if (!ha->dcbx_tlv) {
804 qla_printk(KERN_WARNING, ha, 806 ql_log(ql_log_warn, vha, 0x7078,
805 "Unable to allocate memory for DCBX TLV read-data.\n"); 807 "Unable to allocate memory for DCBX TLV read-data.\n");
806 return 0; 808 return 0;
807 } 809 }
@@ -813,8 +815,8 @@ do_read:
813 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 815 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
814 DCBX_TLV_DATA_SIZE); 816 DCBX_TLV_DATA_SIZE);
815 if (rval != QLA_SUCCESS) { 817 if (rval != QLA_SUCCESS) {
816 qla_printk(KERN_WARNING, ha, 818 ql_log(ql_log_warn, vha, 0x7079,
817 "Unable to read DCBX TLV data (%x).\n", rval); 819 "Unable to read DCBX TLV (%x).\n", rval);
818 count = 0; 820 count = 0;
819 } 821 }
820 822
@@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
869 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 871 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
870 iter->attr); 872 iter->attr);
871 if (ret) 873 if (ret)
872 qla_printk(KERN_INFO, vha->hw, 874 ql_log(ql_log_warn, vha, 0x00f3,
873 "Unable to create sysfs %s binary attribute " 875 "Unable to create sysfs %s binary attribute (%d).\n",
874 "(%d).\n", iter->name, ret); 876 iter->name, ret);
877 else
878 ql_dbg(ql_dbg_init, vha, 0x00f4,
879 "Successfully created sysfs %s binary attribure.\n",
880 iter->name);
875 } 881 }
876} 882}
877 883
@@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1126 return -EPERM; 1132 return -EPERM;
1127 1133
1128 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 1134 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1129 qla_printk(KERN_WARNING, ha, 1135 ql_log(ql_log_warn, vha, 0x707a,
1130 "Abort ISP active -- ignoring beacon request.\n"); 1136 "Abort ISP active -- ignoring beacon request.\n");
1131 return -EBUSY; 1137 return -EBUSY;
1132 } 1138 }
@@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
1322 temp = frac = 0; 1328 temp = frac = 0;
1323 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1329 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1324 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1330 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1325 DEBUG2_3_11(printk(KERN_WARNING 1331 ql_log(ql_log_warn, vha, 0x707b,
1326 "%s(%ld): isp reset in progress.\n", 1332 "ISP reset active.\n");
1327 __func__, vha->host_no));
1328 else if (!vha->hw->flags.eeh_busy) 1333 else if (!vha->hw->flags.eeh_busy)
1329 rval = qla2x00_get_thermal_temp(vha, &temp, &frac); 1334 rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1330 if (rval != QLA_SUCCESS) 1335 if (rval != QLA_SUCCESS)
@@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1343 1348
1344 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1349 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1345 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1350 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1346 DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n", 1351 ql_log(ql_log_warn, vha, 0x707c,
1347 __func__, vha->host_no)); 1352 "ISP reset active.\n");
1348 else if (!vha->hw->flags.eeh_busy) 1353 else if (!vha->hw->flags.eeh_busy)
1349 rval = qla2x00_get_firmware_state(vha, state); 1354 rval = qla2x00_get_firmware_state(vha, state);
1350 if (rval != QLA_SUCCESS) 1355 if (rval != QLA_SUCCESS)
@@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1645 1650
1646 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1651 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1647 if (stats == NULL) { 1652 if (stats == NULL) {
1648 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1653 ql_log(ql_log_warn, vha, 0x707d,
1649 __func__, base_vha->host_no)); 1654 "Failed to allocate memory for stats.\n");
1650 goto done; 1655 goto done;
1651 } 1656 }
1652 memset(stats, 0, DMA_POOL_SIZE); 1657 memset(stats, 0, DMA_POOL_SIZE);
@@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1746 1751
1747 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1752 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1748 if (ret) { 1753 if (ret) {
1749 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, " 1754 ql_log(ql_log_warn, vha, 0x707e,
1750 "status %x\n", ret)); 1755 "Vport sanity check failed, status %x\n", ret);
1751 return (ret); 1756 return (ret);
1752 } 1757 }
1753 1758
1754 vha = qla24xx_create_vhost(fc_vport); 1759 vha = qla24xx_create_vhost(fc_vport);
1755 if (vha == NULL) { 1760 if (vha == NULL) {
1756 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n", 1761 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1757 vha));
1758 return FC_VPORT_FAILED; 1762 return FC_VPORT_FAILED;
1759 } 1763 }
1760 if (disable) { 1764 if (disable) {
@@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1764 atomic_set(&vha->vp_state, VP_FAILED); 1768 atomic_set(&vha->vp_state, VP_FAILED);
1765 1769
1766 /* ready to create vport */ 1770 /* ready to create vport */
1767 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n", 1771 ql_log(ql_log_info, vha, 0x7080,
1768 vha->vp_idx); 1772 "VP entry id %d assigned.\n", vha->vp_idx);
1769 1773
1770 /* initialized vport states */ 1774 /* initialized vport states */
1771 atomic_set(&vha->loop_state, LOOP_DOWN); 1775 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1775 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 1779 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1776 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1780 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1777 /* Don't retry or attempt login of this virtual port */ 1781 /* Don't retry or attempt login of this virtual port */
1778 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1782 ql_dbg(ql_dbg_user, vha, 0x7081,
1779 base_vha->host_no)); 1783 "Vport loop state is not UP.\n");
1780 atomic_set(&vha->loop_state, LOOP_DEAD); 1784 atomic_set(&vha->loop_state, LOOP_DEAD);
1781 if (!disable) 1785 if (!disable)
1782 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1785 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
1786 if (ha->fw_attributes & BIT_4) { 1790 if (ha->fw_attributes & BIT_4) {
1787 vha->flags.difdix_supported = 1; 1791 vha->flags.difdix_supported = 1;
1788 DEBUG18(qla_printk(KERN_INFO, ha, 1792 ql_dbg(ql_dbg_user, vha, 0x7082,
1789 "Registering for DIF/DIX type 1 and 3" 1793 "Registered for DIF/DIX type 1 and 3 protection.\n");
1790 " protection.\n"));
1791 scsi_host_set_prot(vha->host, 1794 scsi_host_set_prot(vha->host,
1792 SHOST_DIF_TYPE1_PROTECTION 1795 SHOST_DIF_TYPE1_PROTECTION
1793 | SHOST_DIF_TYPE2_PROTECTION 1796 | SHOST_DIF_TYPE2_PROTECTION
@@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1802 1805
1803 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1806 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1804 &ha->pdev->dev)) { 1807 &ha->pdev->dev)) {
1805 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1808 ql_dbg(ql_dbg_user, vha, 0x7083,
1806 vha->host_no, vha->vp_idx)); 1809 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1807 goto vport_create_failed_2; 1810 goto vport_create_failed_2;
1808 } 1811 }
1809 1812
@@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1820 1823
1821 if (ha->flags.cpu_affinity_enabled) { 1824 if (ha->flags.cpu_affinity_enabled) {
1822 req = ha->req_q_map[1]; 1825 req = ha->req_q_map[1];
1826 ql_dbg(ql_dbg_multiq, vha, 0xc000,
1827 "Request queue %p attached with "
1828 "VP[%d], cpu affinity =%d\n",
1829 req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1823 goto vport_queue; 1830 goto vport_queue;
1824 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1831 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1825 goto vport_queue; 1832 goto vport_queue;
@@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1836 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1843 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1837 qos); 1844 qos);
1838 if (!ret) 1845 if (!ret)
1839 qla_printk(KERN_WARNING, ha, 1846 ql_log(ql_log_warn, vha, 0x7084,
1840 "Can't create request queue for vp_idx:%d\n", 1847 "Can't create request queue for VP[%d]\n",
1841 vha->vp_idx); 1848 vha->vp_idx);
1842 else { 1849 else {
1843 DEBUG2(qla_printk(KERN_INFO, ha, 1850 ql_dbg(ql_dbg_multiq, vha, 0xc001,
1844 "Request Que:%d (QoS: %d) created for vp_idx:%d\n", 1851 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1845 ret, qos, vha->vp_idx)); 1852 ret, qos, vha->vp_idx);
1853 ql_dbg(ql_dbg_user, vha, 0x7085,
1854 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1855 ret, qos, vha->vp_idx);
1846 req = ha->req_q_map[ret]; 1856 req = ha->req_q_map[ret];
1847 } 1857 }
1848 } 1858 }
@@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1882 1892
1883 if (vha->timer_active) { 1893 if (vha->timer_active) {
1884 qla2x00_vp_stop_timer(vha); 1894 qla2x00_vp_stop_timer(vha);
1885 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" 1895 ql_dbg(ql_dbg_user, vha, 0x7086,
1886 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); 1896 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1887 } 1897 }
1888 1898
1889 /* No pending activities shall be there on the vha now */ 1899 /* No pending activities shall be there on the vha now */
1890 DEBUG(msleep(random32()%10)); /* Just to see if something falls on 1900 if (ql2xextended_error_logging & ql_dbg_user)
1901 msleep(random32()%10); /* Just to see if something falls on
1891 * the net we have placed below */ 1902 * the net we have placed below */
1892 1903
1893 BUG_ON(atomic_read(&vha->vref_count)); 1904 BUG_ON(atomic_read(&vha->vref_count));
@@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1901 1912
1902 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1913 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1903 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1914 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1904 qla_printk(KERN_WARNING, ha, 1915 ql_log(ql_log_warn, vha, 0x7087,
1905 "Queue delete failed.\n"); 1916 "Queue delete failed.\n");
1906 } 1917 }
1907 1918
1908 scsi_host_put(vha->host); 1919 scsi_host_put(vha->host);
1909 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); 1920 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1910 return 0; 1921 return 0;
1911} 1922}
1912 1923
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8c10e2c4928e..07d1767cd26b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -36,7 +36,8 @@ done:
36} 36}
37 37
38int 38int
39qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 39qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
40 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
40{ 41{
41 int i, ret, num_valid; 42 int i, ret, num_valid;
42 uint8_t *bcode; 43 uint8_t *bcode;
@@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
51 52
52 if (bcode_val == 0xFFFFFFFF) { 53 if (bcode_val == 0xFFFFFFFF) {
53 /* No FCP Priority config data in flash */ 54 /* No FCP Priority config data in flash */
54 DEBUG2(printk(KERN_INFO 55 ql_dbg(ql_dbg_user, vha, 0x7051,
55 "%s: No FCP priority config data.\n", 56 "No FCP Priority config data.\n");
56 __func__));
57 return 0; 57 return 0;
58 } 58 }
59 59
60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 bcode[3] != 'S') { 61 bcode[3] != 'S') {
62 /* Invalid FCP priority data header*/ 62 /* Invalid FCP priority data header*/
63 DEBUG2(printk(KERN_ERR 63 ql_dbg(ql_dbg_user, vha, 0x7052,
64 "%s: Invalid FCP Priority data header. bcode=0x%x\n", 64 "Invalid FCP Priority data header. bcode=0x%x.\n",
65 __func__, bcode_val)); 65 bcode_val);
66 return 0; 66 return 0;
67 } 67 }
68 if (flag != 1) 68 if (flag != 1)
@@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
77 77
78 if (num_valid == 0) { 78 if (num_valid == 0) {
79 /* No valid FCP priority data entries */ 79 /* No valid FCP priority data entries */
80 DEBUG2(printk(KERN_ERR 80 ql_dbg(ql_dbg_user, vha, 0x7053,
81 "%s: No valid FCP Priority data entries.\n", 81 "No valid FCP Priority data entries.\n");
82 __func__));
83 ret = 0; 82 ret = 0;
84 } else { 83 } else {
85 /* FCP priority data is valid */ 84 /* FCP priority data is valid */
86 DEBUG2(printk(KERN_INFO 85 ql_dbg(ql_dbg_user, vha, 0x7054,
87 "%s: Valid FCP priority data. num entries = %d\n", 86 "Valid FCP priority data. num entries = %d.\n",
88 __func__, num_valid)); 87 num_valid);
89 } 88 }
90 89
91 return ret; 90 return ret;
@@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
182 if (!ha->fcp_prio_cfg) { 181 if (!ha->fcp_prio_cfg) {
183 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 182 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
184 if (!ha->fcp_prio_cfg) { 183 if (!ha->fcp_prio_cfg) {
185 qla_printk(KERN_WARNING, ha, 184 ql_log(ql_log_warn, vha, 0x7050,
186 "Unable to allocate memory " 185 "Unable to allocate memory for fcp prio "
187 "for fcp prio config data (%x).\n", 186 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
188 FCP_PRIO_CFG_SIZE);
189 bsg_job->reply->result = (DID_ERROR << 16); 187 bsg_job->reply->result = (DID_ERROR << 16);
190 ret = -ENOMEM; 188 ret = -ENOMEM;
191 goto exit_fcp_prio_cfg; 189 goto exit_fcp_prio_cfg;
@@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
198 FCP_PRIO_CFG_SIZE); 196 FCP_PRIO_CFG_SIZE);
199 197
200 /* validate fcp priority data */ 198 /* validate fcp priority data */
201 if (!qla24xx_fcp_prio_cfg_valid( 199
202 (struct qla_fcp_prio_cfg *) 200 if (!qla24xx_fcp_prio_cfg_valid(vha,
203 ha->fcp_prio_cfg, 1)) { 201 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
204 bsg_job->reply->result = (DID_ERROR << 16); 202 bsg_job->reply->result = (DID_ERROR << 16);
205 ret = -EINVAL; 203 ret = -EINVAL;
206 /* If buffer was invalidatic int 204 /* If buffer was invalidatic int
@@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
256 254
257 /* pass through is supported only for ISP 4Gb or higher */ 255 /* pass through is supported only for ISP 4Gb or higher */
258 if (!IS_FWI2_CAPABLE(ha)) { 256 if (!IS_FWI2_CAPABLE(ha)) {
259 DEBUG2(qla_printk(KERN_INFO, ha, 257 ql_dbg(ql_dbg_user, vha, 0x7001,
260 "scsi(%ld):ELS passthru not supported for ISP23xx based " 258 "ELS passthru not supported for ISP23xx based adapters.\n");
261 "adapters\n", vha->host_no));
262 rval = -EPERM; 259 rval = -EPERM;
263 goto done; 260 goto done;
264 } 261 }
@@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
266 /* Multiple SG's are not supported for ELS requests */ 263 /* Multiple SG's are not supported for ELS requests */
267 if (bsg_job->request_payload.sg_cnt > 1 || 264 if (bsg_job->request_payload.sg_cnt > 1 ||
268 bsg_job->reply_payload.sg_cnt > 1) { 265 bsg_job->reply_payload.sg_cnt > 1) {
269 DEBUG2(printk(KERN_INFO 266 ql_dbg(ql_dbg_user, vha, 0x7002,
270 "multiple SG's are not supported for ELS requests" 267 "Multiple SG's are not suppored for ELS requests, "
271 " [request_sg_cnt: %x reply_sg_cnt: %x]\n", 268 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
272 bsg_job->request_payload.sg_cnt, 269 bsg_job->request_payload.sg_cnt,
273 bsg_job->reply_payload.sg_cnt)); 270 bsg_job->reply_payload.sg_cnt);
274 rval = -EPERM; 271 rval = -EPERM;
275 goto done; 272 goto done;
276 } 273 }
@@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
281 * if not perform fabric login 278 * if not perform fabric login
282 */ 279 */
283 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 280 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
284 DEBUG2(qla_printk(KERN_WARNING, ha, 281 ql_dbg(ql_dbg_user, vha, 0x7003,
285 "failed to login port %06X for ELS passthru\n", 282 "Failed to login port %06X for ELS passthru.\n",
286 fcport->d_id.b24)); 283 fcport->d_id.b24);
287 rval = -EIO; 284 rval = -EIO;
288 goto done; 285 goto done;
289 } 286 }
@@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
314 } 311 }
315 312
316 if (!vha->flags.online) { 313 if (!vha->flags.online) {
317 DEBUG2(qla_printk(KERN_WARNING, ha, 314 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
318 "host not online\n"));
319 rval = -EIO; 315 rval = -EIO;
320 goto done; 316 goto done;
321 } 317 }
@@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
337 333
338 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 334 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
339 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 335 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
340 DEBUG2(printk(KERN_INFO 336 ql_log(ql_log_warn, vha, 0x7008,
341 "dma mapping resulted in different sg counts \ 337 "dma mapping resulted in different sg counts, "
342 [request_sg_cnt: %x dma_request_sg_cnt: %x\ 338 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
343 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 339 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
344 bsg_job->request_payload.sg_cnt, req_sg_cnt, 340 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
345 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
346 rval = -EAGAIN; 341 rval = -EAGAIN;
347 goto done_unmap_sg; 342 goto done_unmap_sg;
348 } 343 }
@@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
363 "bsg_els_rpt" : "bsg_els_hst"); 358 "bsg_els_rpt" : "bsg_els_hst");
364 els->u.bsg_job = bsg_job; 359 els->u.bsg_job = bsg_job;
365 360
366 DEBUG2(qla_printk(KERN_INFO, ha, 361 ql_dbg(ql_dbg_user, vha, 0x700a,
367 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " 362 "bsg rqst type: %s els type: %x - loop-id=%x "
368 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, 363 "portid=%-2x%02x%02x.\n", type,
369 bsg_job->request->rqst_data.h_els.command_code, 364 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
370 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 365 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
371 fcport->d_id.b.al_pa));
372 366
373 rval = qla2x00_start_sp(sp); 367 rval = qla2x00_start_sp(sp);
374 if (rval != QLA_SUCCESS) { 368 if (rval != QLA_SUCCESS) {
369 ql_log(ql_log_warn, vha, 0x700e,
370 "qla2x00_start_sp failed = %d\n", rval);
375 kfree(sp->ctx); 371 kfree(sp->ctx);
376 mempool_free(sp, ha->srb_mempool); 372 mempool_free(sp, ha->srb_mempool);
377 rval = -EIO; 373 rval = -EIO;
@@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
411 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 407 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 408 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 if (!req_sg_cnt) { 409 if (!req_sg_cnt) {
410 ql_log(ql_log_warn, vha, 0x700f,
411 "dma_map_sg return %d for request\n", req_sg_cnt);
414 rval = -ENOMEM; 412 rval = -ENOMEM;
415 goto done; 413 goto done;
416 } 414 }
@@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
418 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 416 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
419 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 417 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
420 if (!rsp_sg_cnt) { 418 if (!rsp_sg_cnt) {
419 ql_log(ql_log_warn, vha, 0x7010,
420 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
421 rval = -ENOMEM; 421 rval = -ENOMEM;
422 goto done; 422 goto done;
423 } 423 }
424 424
425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
427 DEBUG2(qla_printk(KERN_WARNING, ha, 427 ql_log(ql_log_warn, vha, 0x7011,
428 "[request_sg_cnt: %x dma_request_sg_cnt: %x\ 428 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
429 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 429 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
430 bsg_job->request_payload.sg_cnt, req_sg_cnt, 430 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
431 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
432 rval = -EAGAIN; 431 rval = -EAGAIN;
433 goto done_unmap_sg; 432 goto done_unmap_sg;
434 } 433 }
435 434
436 if (!vha->flags.online) { 435 if (!vha->flags.online) {
437 DEBUG2(qla_printk(KERN_WARNING, ha, 436 ql_log(ql_log_warn, vha, 0x7012,
438 "host not online\n")); 437 "Host is not online.\n");
439 rval = -EIO; 438 rval = -EIO;
440 goto done_unmap_sg; 439 goto done_unmap_sg;
441 } 440 }
@@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
451 loop_id = vha->mgmt_svr_loop_id; 450 loop_id = vha->mgmt_svr_loop_id;
452 break; 451 break;
453 default: 452 default:
454 DEBUG2(qla_printk(KERN_INFO, ha, 453 ql_dbg(ql_dbg_user, vha, 0x7013,
455 "Unknown loop id: %x\n", loop_id)); 454 "Unknown loop id: %x.\n", loop_id);
456 rval = -EINVAL; 455 rval = -EINVAL;
457 goto done_unmap_sg; 456 goto done_unmap_sg;
458 } 457 }
@@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
464 */ 463 */
465 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 464 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
466 if (!fcport) { 465 if (!fcport) {
466 ql_log(ql_log_warn, vha, 0x7014,
467 "Failed to allocate fcport.\n");
467 rval = -ENOMEM; 468 rval = -ENOMEM;
468 goto done_unmap_sg; 469 goto done_unmap_sg;
469 } 470 }
@@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
479 /* Alloc SRB structure */ 480 /* Alloc SRB structure */
480 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); 481 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
481 if (!sp) { 482 if (!sp) {
483 ql_log(ql_log_warn, vha, 0x7015,
484 "qla2x00_get_ctx_bsg_sp failed.\n");
482 rval = -ENOMEM; 485 rval = -ENOMEM;
483 goto done_free_fcport; 486 goto done_free_fcport;
484 } 487 }
@@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
488 ct->name = "bsg_ct"; 491 ct->name = "bsg_ct";
489 ct->u.bsg_job = bsg_job; 492 ct->u.bsg_job = bsg_job;
490 493
491 DEBUG2(qla_printk(KERN_INFO, ha, 494 ql_dbg(ql_dbg_user, vha, 0x7016,
492 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " 495 "bsg rqst type: %s else type: %x - "
493 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, 496 "loop-id=%x portid=%02x%02x%02x.\n", type,
494 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), 497 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
495 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 498 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
496 fcport->d_id.b.al_pa)); 499 fcport->d_id.b.al_pa);
497 500
498 rval = qla2x00_start_sp(sp); 501 rval = qla2x00_start_sp(sp);
499 if (rval != QLA_SUCCESS) { 502 if (rval != QLA_SUCCESS) {
503 ql_log(ql_log_warn, vha, 0x7017,
504 "qla2x00_start_sp failed=%d.\n", rval);
500 kfree(sp->ctx); 505 kfree(sp->ctx);
501 mempool_free(sp, ha->srb_mempool); 506 mempool_free(sp, ha->srb_mempool);
502 rval = -EIO; 507 rval = -EIO;
@@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
535 ha->notify_dcbx_comp = 1; 540 ha->notify_dcbx_comp = 1;
536 ret = qla81xx_set_port_config(vha, new_config); 541 ret = qla81xx_set_port_config(vha, new_config);
537 if (ret != QLA_SUCCESS) { 542 if (ret != QLA_SUCCESS) {
538 DEBUG2(printk(KERN_ERR 543 ql_log(ql_log_warn, vha, 0x7021,
539 "%s(%lu): Set port config failed\n", 544 "set port config failed.\n");
540 __func__, vha->host_no));
541 ha->notify_dcbx_comp = 0; 545 ha->notify_dcbx_comp = 0;
542 rval = -EINVAL; 546 rval = -EINVAL;
543 goto done_set_internal; 547 goto done_set_internal;
@@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
545 549
546 /* Wait for DCBX complete event */ 550 /* Wait for DCBX complete event */
547 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { 551 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
548 DEBUG2(qla_printk(KERN_WARNING, ha, 552 ql_dbg(ql_dbg_user, vha, 0x7022,
549 "State change notificaition not received.\n")); 553 "State change notification not received.\n");
550 } else 554 } else
551 DEBUG2(qla_printk(KERN_INFO, ha, 555 ql_dbg(ql_dbg_user, vha, 0x7023,
552 "State change RECEIVED\n")); 556 "State change received.\n");
553 557
554 ha->notify_dcbx_comp = 0; 558 ha->notify_dcbx_comp = 0;
555 559
@@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
581 ha->notify_dcbx_comp = wait; 585 ha->notify_dcbx_comp = wait;
582 ret = qla81xx_set_port_config(vha, new_config); 586 ret = qla81xx_set_port_config(vha, new_config);
583 if (ret != QLA_SUCCESS) { 587 if (ret != QLA_SUCCESS) {
584 DEBUG2(printk(KERN_ERR 588 ql_log(ql_log_warn, vha, 0x7025,
585 "%s(%lu): Set port config failed\n", 589 "Set port config failed.\n");
586 __func__, vha->host_no));
587 ha->notify_dcbx_comp = 0; 590 ha->notify_dcbx_comp = 0;
588 rval = -EINVAL; 591 rval = -EINVAL;
589 goto done_reset_internal; 592 goto done_reset_internal;
@@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
592 /* Wait for DCBX complete event */ 595 /* Wait for DCBX complete event */
593 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 596 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
594 (20 * HZ))) { 597 (20 * HZ))) {
595 DEBUG2(qla_printk(KERN_WARNING, ha, 598 ql_dbg(ql_dbg_user, vha, 0x7026,
596 "State change notificaition not received.\n")); 599 "State change notification not received.\n");
597 ha->notify_dcbx_comp = 0; 600 ha->notify_dcbx_comp = 0;
598 rval = -EINVAL; 601 rval = -EINVAL;
599 goto done_reset_internal; 602 goto done_reset_internal;
600 } else 603 } else
601 DEBUG2(qla_printk(KERN_INFO, ha, 604 ql_dbg(ql_dbg_user, vha, 0x7027,
602 "State change RECEIVED\n")); 605 "State change received.\n");
603 606
604 ha->notify_dcbx_comp = 0; 607 ha->notify_dcbx_comp = 0;
605 } 608 }
@@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
629 632
630 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 633 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
631 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 634 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
632 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 635 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
636 ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
633 return -EBUSY; 637 return -EBUSY;
638 }
634 639
635 if (!vha->flags.online) { 640 if (!vha->flags.online) {
636 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n")); 641 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
637 return -EIO; 642 return -EIO;
638 } 643 }
639 644
@@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
641 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 646 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
642 DMA_TO_DEVICE); 647 DMA_TO_DEVICE);
643 648
644 if (!elreq.req_sg_cnt) 649 if (!elreq.req_sg_cnt) {
650 ql_log(ql_log_warn, vha, 0x701a,
651 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
645 return -ENOMEM; 652 return -ENOMEM;
653 }
646 654
647 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 655 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
648 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 656 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
649 DMA_FROM_DEVICE); 657 DMA_FROM_DEVICE);
650 658
651 if (!elreq.rsp_sg_cnt) { 659 if (!elreq.rsp_sg_cnt) {
660 ql_log(ql_log_warn, vha, 0x701b,
661 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
652 rval = -ENOMEM; 662 rval = -ENOMEM;
653 goto done_unmap_req_sg; 663 goto done_unmap_req_sg;
654 } 664 }
655 665
656 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 666 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
657 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 667 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
658 DEBUG2(printk(KERN_INFO 668 ql_log(ql_log_warn, vha, 0x701c,
659 "dma mapping resulted in different sg counts " 669 "dma mapping resulted in different sg counts, "
660 "[request_sg_cnt: %x dma_request_sg_cnt: %x " 670 "request_sg_cnt: %x dma_request_sg_cnt: %x "
661 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 671 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
662 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 672 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
663 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt)); 673 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
664 rval = -EAGAIN; 674 rval = -EAGAIN;
665 goto done_unmap_sg; 675 goto done_unmap_sg;
666 } 676 }
@@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
668 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 678 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
669 &req_data_dma, GFP_KERNEL); 679 &req_data_dma, GFP_KERNEL);
670 if (!req_data) { 680 if (!req_data) {
671 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data " 681 ql_log(ql_log_warn, vha, 0x701d,
672 "failed for host=%lu\n", __func__, vha->host_no)); 682 "dma alloc failed for req_data.\n");
673 rval = -ENOMEM; 683 rval = -ENOMEM;
674 goto done_unmap_sg; 684 goto done_unmap_sg;
675 } 685 }
@@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
677 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 687 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
678 &rsp_data_dma, GFP_KERNEL); 688 &rsp_data_dma, GFP_KERNEL);
679 if (!rsp_data) { 689 if (!rsp_data) {
680 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data " 690 ql_log(ql_log_warn, vha, 0x7004,
681 "failed for host=%lu\n", __func__, vha->host_no)); 691 "dma alloc failed for rsp_data.\n");
682 rval = -ENOMEM; 692 rval = -ENOMEM;
683 goto done_free_dma_req; 693 goto done_free_dma_req;
684 } 694 }
@@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
699 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 709 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
700 elreq.options == EXTERNAL_LOOPBACK) { 710 elreq.options == EXTERNAL_LOOPBACK) {
701 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 711 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
702 DEBUG2(qla_printk(KERN_INFO, ha, 712 ql_dbg(ql_dbg_user, vha, 0x701e,
703 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type)); 713 "BSG request type: %s.\n", type);
704 command_sent = INT_DEF_LB_ECHO_CMD; 714 command_sent = INT_DEF_LB_ECHO_CMD;
705 rval = qla2x00_echo_test(vha, &elreq, response); 715 rval = qla2x00_echo_test(vha, &elreq, response);
706 } else { 716 } else {
@@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
708 memset(config, 0, sizeof(config)); 718 memset(config, 0, sizeof(config));
709 memset(new_config, 0, sizeof(new_config)); 719 memset(new_config, 0, sizeof(new_config));
710 if (qla81xx_get_port_config(vha, config)) { 720 if (qla81xx_get_port_config(vha, config)) {
711 DEBUG2(printk(KERN_ERR 721 ql_log(ql_log_warn, vha, 0x701f,
712 "%s(%lu): Get port config failed\n", 722 "Get port config failed.\n");
713 __func__, vha->host_no));
714 bsg_job->reply->reply_payload_rcv_len = 0; 723 bsg_job->reply->reply_payload_rcv_len = 0;
715 bsg_job->reply->result = (DID_ERROR << 16); 724 bsg_job->reply->result = (DID_ERROR << 16);
716 rval = -EPERM; 725 rval = -EPERM;
@@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
718 } 727 }
719 728
720 if (elreq.options != EXTERNAL_LOOPBACK) { 729 if (elreq.options != EXTERNAL_LOOPBACK) {
721 DEBUG2(qla_printk(KERN_INFO, ha, 730 ql_dbg(ql_dbg_user, vha, 0x7020,
722 "Internal: current port config = %x\n", 731 "Internal: curent port config = %x\n",
723 config[0])); 732 config[0]);
724 if (qla81xx_set_internal_loopback(vha, config, 733 if (qla81xx_set_internal_loopback(vha, config,
725 new_config)) { 734 new_config)) {
735 ql_log(ql_log_warn, vha, 0x7024,
736 "Internal loopback failed.\n");
726 bsg_job->reply->reply_payload_rcv_len = 737 bsg_job->reply->reply_payload_rcv_len =
727 0; 738 0;
728 bsg_job->reply->result = 739 bsg_job->reply->result =
@@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
746 } 757 }
747 758
748 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 759 type = "FC_BSG_HST_VENDOR_LOOPBACK";
749 DEBUG2(qla_printk(KERN_INFO, ha, 760 ql_dbg(ql_dbg_user, vha, 0x7028,
750 "scsi(%ld) bsg rqst type: %s\n", 761 "BSG request type: %s.\n", type);
751 vha->host_no, type));
752 762
753 command_sent = INT_DEF_LB_LOOPBACK_CMD; 763 command_sent = INT_DEF_LB_LOOPBACK_CMD;
754 rval = qla2x00_loopback_test(vha, &elreq, response); 764 rval = qla2x00_loopback_test(vha, &elreq, response);
@@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
763 773
764 if (response[0] == MBS_COMMAND_ERROR && 774 if (response[0] == MBS_COMMAND_ERROR &&
765 response[1] == MBS_LB_RESET) { 775 response[1] == MBS_LB_RESET) {
766 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing " 776 ql_log(ql_log_warn, vha, 0x7029,
767 "ISP\n", __func__, vha->host_no)); 777 "MBX command error, Aborting ISP.\n");
768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 778 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
769 qla2xxx_wake_dpc(vha); 779 qla2xxx_wake_dpc(vha);
770 qla2x00_wait_for_chip_reset(vha); 780 qla2x00_wait_for_chip_reset(vha);
771 /* Also reset the MPI */ 781 /* Also reset the MPI */
772 if (qla81xx_restart_mpi_firmware(vha) != 782 if (qla81xx_restart_mpi_firmware(vha) !=
773 QLA_SUCCESS) { 783 QLA_SUCCESS) {
774 qla_printk(KERN_INFO, ha, 784 ql_log(ql_log_warn, vha, 0x702a,
775 "MPI reset failed for host%ld.\n", 785 "MPI reset failed.\n");
776 vha->host_no);
777 } 786 }
778 787
779 bsg_job->reply->reply_payload_rcv_len = 0; 788 bsg_job->reply->reply_payload_rcv_len = 0;
@@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
783 } 792 }
784 } else { 793 } else {
785 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 794 type = "FC_BSG_HST_VENDOR_LOOPBACK";
786 DEBUG2(qla_printk(KERN_INFO, ha, 795 ql_dbg(ql_dbg_user, vha, 0x702b,
787 "scsi(%ld) bsg rqst type: %s\n", 796 "BSG request type: %s.\n", type);
788 vha->host_no, type));
789 command_sent = INT_DEF_LB_LOOPBACK_CMD; 797 command_sent = INT_DEF_LB_LOOPBACK_CMD;
790 rval = qla2x00_loopback_test(vha, &elreq, response); 798 rval = qla2x00_loopback_test(vha, &elreq, response);
791 } 799 }
792 } 800 }
793 801
794 if (rval) { 802 if (rval) {
795 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 803 ql_log(ql_log_warn, vha, 0x702c,
796 "request %s failed\n", vha->host_no, type)); 804 "Vendor request %s failed.\n", type);
797 805
798 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 806 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
799 sizeof(struct fc_bsg_reply); 807 sizeof(struct fc_bsg_reply);
@@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
805 bsg_job->reply->reply_payload_rcv_len = 0; 813 bsg_job->reply->reply_payload_rcv_len = 0;
806 bsg_job->reply->result = (DID_ERROR << 16); 814 bsg_job->reply->result = (DID_ERROR << 16);
807 } else { 815 } else {
808 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 816 ql_dbg(ql_dbg_user, vha, 0x702d,
809 "request %s completed\n", vha->host_no, type)); 817 "Vendor request %s completed.\n", type);
810 818
811 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 819 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
812 sizeof(response) + sizeof(uint8_t); 820 sizeof(response) + sizeof(uint8_t);
@@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
851 859
852 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 860 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
853 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 861 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
854 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 862 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
863 ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
855 return -EBUSY; 864 return -EBUSY;
865 }
856 866
857 if (!IS_QLA84XX(ha)) { 867 if (!IS_QLA84XX(ha)) {
858 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 868 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
859 "exiting.\n", vha->host_no));
860 return -EINVAL; 869 return -EINVAL;
861 } 870 }
862 871
@@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
865 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 874 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
866 875
867 if (rval) { 876 if (rval) {
868 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 877 ql_log(ql_log_warn, vha, 0x7030,
869 "request 84xx reset failed\n", vha->host_no)); 878 "Vendor request 84xx reset failed.\n");
870 rval = bsg_job->reply->reply_payload_rcv_len = 0; 879 rval = bsg_job->reply->reply_payload_rcv_len = 0;
871 bsg_job->reply->result = (DID_ERROR << 16); 880 bsg_job->reply->result = (DID_ERROR << 16);
872 881
873 } else { 882 } else {
874 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 883 ql_dbg(ql_dbg_user, vha, 0x7031,
875 "request 84xx reset completed\n", vha->host_no)); 884 "Vendor request 84xx reset completed.\n");
876 bsg_job->reply->result = DID_OK; 885 bsg_job->reply->result = DID_OK;
877 } 886 }
878 887
@@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
902 return -EBUSY; 911 return -EBUSY;
903 912
904 if (!IS_QLA84XX(ha)) { 913 if (!IS_QLA84XX(ha)) {
905 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 914 ql_dbg(ql_dbg_user, vha, 0x7032,
906 "exiting.\n", vha->host_no)); 915 "Not 84xx, exiting.\n");
907 return -EINVAL; 916 return -EINVAL;
908 } 917 }
909 918
910 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 919 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
911 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 920 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
912 if (!sg_cnt) 921 if (!sg_cnt) {
922 ql_log(ql_log_warn, vha, 0x7033,
923 "dma_map_sg returned %d for request.\n", sg_cnt);
913 return -ENOMEM; 924 return -ENOMEM;
925 }
914 926
915 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 927 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
916 DEBUG2(printk(KERN_INFO 928 ql_log(ql_log_warn, vha, 0x7034,
917 "dma mapping resulted in different sg counts " 929 "DMA mapping resulted in different sg counts, "
918 "request_sg_cnt: %x dma_request_sg_cnt: %x ", 930 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
919 bsg_job->request_payload.sg_cnt, sg_cnt)); 931 bsg_job->request_payload.sg_cnt, sg_cnt);
920 rval = -EAGAIN; 932 rval = -EAGAIN;
921 goto done_unmap_sg; 933 goto done_unmap_sg;
922 } 934 }
@@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
925 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 937 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
926 &fw_dma, GFP_KERNEL); 938 &fw_dma, GFP_KERNEL);
927 if (!fw_buf) { 939 if (!fw_buf) {
928 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf " 940 ql_log(ql_log_warn, vha, 0x7035,
929 "failed for host=%lu\n", __func__, vha->host_no)); 941 "DMA alloc failed for fw_buf.\n");
930 rval = -ENOMEM; 942 rval = -ENOMEM;
931 goto done_unmap_sg; 943 goto done_unmap_sg;
932 } 944 }
@@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
936 948
937 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 949 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
938 if (!mn) { 950 if (!mn) {
939 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " 951 ql_log(ql_log_warn, vha, 0x7036,
940 "failed for host=%lu\n", __func__, vha->host_no)); 952 "DMA alloc failed for fw buffer.\n");
941 rval = -ENOMEM; 953 rval = -ENOMEM;
942 goto done_free_fw_buf; 954 goto done_free_fw_buf;
943 } 955 }
@@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
965 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 977 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
966 978
967 if (rval) { 979 if (rval) {
968 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 980 ql_log(ql_log_warn, vha, 0x7037,
969 "request 84xx updatefw failed\n", vha->host_no)); 981 "Vendor request 84xx updatefw failed.\n");
970 982
971 rval = bsg_job->reply->reply_payload_rcv_len = 0; 983 rval = bsg_job->reply->reply_payload_rcv_len = 0;
972 bsg_job->reply->result = (DID_ERROR << 16); 984 bsg_job->reply->result = (DID_ERROR << 16);
973 985
974 } else { 986 } else {
975 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 987 ql_dbg(ql_dbg_user, vha, 0x7038,
976 "request 84xx updatefw completed\n", vha->host_no)); 988 "Vendor request 84xx updatefw completed.\n");
977 989
978 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 990 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
979 bsg_job->reply->result = DID_OK; 991 bsg_job->reply->result = DID_OK;
@@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1009 1021
1010 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1022 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1011 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1023 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1012 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 1024 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1025 ql_log(ql_log_warn, vha, 0x7039,
1026 "Abort active or needed.\n");
1013 return -EBUSY; 1027 return -EBUSY;
1028 }
1014 1029
1015 if (!IS_QLA84XX(ha)) { 1030 if (!IS_QLA84XX(ha)) {
1016 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 1031 ql_log(ql_log_warn, vha, 0x703a,
1017 "exiting.\n", vha->host_no)); 1032 "Not 84xx, exiting.\n");
1018 return -EINVAL; 1033 return -EINVAL;
1019 } 1034 }
1020 1035
1021 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request + 1036 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1022 sizeof(struct fc_bsg_request)); 1037 sizeof(struct fc_bsg_request));
1023 if (!ql84_mgmt) { 1038 if (!ql84_mgmt) {
1024 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n", 1039 ql_log(ql_log_warn, vha, 0x703b,
1025 __func__, vha->host_no)); 1040 "MGMT header not provided, exiting.\n");
1026 return -EINVAL; 1041 return -EINVAL;
1027 } 1042 }
1028 1043
1029 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1044 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1030 if (!mn) { 1045 if (!mn) {
1031 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " 1046 ql_log(ql_log_warn, vha, 0x703c,
1032 "failed for host=%lu\n", __func__, vha->host_no)); 1047 "DMA alloc failed for fw buffer.\n");
1033 return -ENOMEM; 1048 return -ENOMEM;
1034 } 1049 }
1035 1050
@@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1044 bsg_job->reply_payload.sg_list, 1059 bsg_job->reply_payload.sg_list,
1045 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1060 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1046 if (!sg_cnt) { 1061 if (!sg_cnt) {
1062 ql_log(ql_log_warn, vha, 0x703d,
1063 "dma_map_sg returned %d for reply.\n", sg_cnt);
1047 rval = -ENOMEM; 1064 rval = -ENOMEM;
1048 goto exit_mgmt; 1065 goto exit_mgmt;
1049 } 1066 }
@@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1051 dma_direction = DMA_FROM_DEVICE; 1068 dma_direction = DMA_FROM_DEVICE;
1052 1069
1053 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1070 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1054 DEBUG2(printk(KERN_INFO 1071 ql_log(ql_log_warn, vha, 0x703e,
1055 "dma mapping resulted in different sg counts " 1072 "DMA mapping resulted in different sg counts, "
1056 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n", 1073 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1057 bsg_job->reply_payload.sg_cnt, sg_cnt)); 1074 bsg_job->reply_payload.sg_cnt, sg_cnt);
1058 rval = -EAGAIN; 1075 rval = -EAGAIN;
1059 goto done_unmap_sg; 1076 goto done_unmap_sg;
1060 } 1077 }
@@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1064 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1081 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1065 &mgmt_dma, GFP_KERNEL); 1082 &mgmt_dma, GFP_KERNEL);
1066 if (!mgmt_b) { 1083 if (!mgmt_b) {
1067 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " 1084 ql_log(ql_log_warn, vha, 0x703f,
1068 "failed for host=%lu\n", 1085 "DMA alloc failed for mgmt_b.\n");
1069 __func__, vha->host_no));
1070 rval = -ENOMEM; 1086 rval = -ENOMEM;
1071 goto done_unmap_sg; 1087 goto done_unmap_sg;
1072 } 1088 }
@@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1094 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1110 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1095 1111
1096 if (!sg_cnt) { 1112 if (!sg_cnt) {
1113 ql_log(ql_log_warn, vha, 0x7040,
1114 "dma_map_sg returned %d.\n", sg_cnt);
1097 rval = -ENOMEM; 1115 rval = -ENOMEM;
1098 goto exit_mgmt; 1116 goto exit_mgmt;
1099 } 1117 }
@@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1101 dma_direction = DMA_TO_DEVICE; 1119 dma_direction = DMA_TO_DEVICE;
1102 1120
1103 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1121 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1104 DEBUG2(printk(KERN_INFO 1122 ql_log(ql_log_warn, vha, 0x7041,
1105 "dma mapping resulted in different sg counts " 1123 "DMA mapping resulted in different sg counts, "
1106 "request_sg_cnt: %x dma_request_sg_cnt: %x ", 1124 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1107 bsg_job->request_payload.sg_cnt, sg_cnt)); 1125 bsg_job->request_payload.sg_cnt, sg_cnt);
1108 rval = -EAGAIN; 1126 rval = -EAGAIN;
1109 goto done_unmap_sg; 1127 goto done_unmap_sg;
1110 } 1128 }
@@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1113 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1131 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1114 &mgmt_dma, GFP_KERNEL); 1132 &mgmt_dma, GFP_KERNEL);
1115 if (!mgmt_b) { 1133 if (!mgmt_b) {
1116 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " 1134 ql_log(ql_log_warn, vha, 0x7042,
1117 "failed for host=%lu\n", 1135 "DMA alloc failed for mgmt_b.\n");
1118 __func__, vha->host_no));
1119 rval = -ENOMEM; 1136 rval = -ENOMEM;
1120 goto done_unmap_sg; 1137 goto done_unmap_sg;
1121 } 1138 }
@@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1156 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1173 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1157 1174
1158 if (rval) { 1175 if (rval) {
1159 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 1176 ql_log(ql_log_warn, vha, 0x7043,
1160 "request 84xx mgmt failed\n", vha->host_no)); 1177 "Vendor request 84xx mgmt failed.\n");
1161 1178
1162 rval = bsg_job->reply->reply_payload_rcv_len = 0; 1179 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1163 bsg_job->reply->result = (DID_ERROR << 16); 1180 bsg_job->reply->result = (DID_ERROR << 16);
1164 1181
1165 } else { 1182 } else {
1166 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 1183 ql_dbg(ql_dbg_user, vha, 0x7044,
1167 "request 84xx mgmt completed\n", vha->host_no)); 1184 "Vendor request 84xx mgmt completed.\n");
1168 1185
1169 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1186 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1170 bsg_job->reply->result = DID_OK; 1187 bsg_job->reply->result = DID_OK;
@@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1204{ 1221{
1205 struct Scsi_Host *host = bsg_job->shost; 1222 struct Scsi_Host *host = bsg_job->shost;
1206 scsi_qla_host_t *vha = shost_priv(host); 1223 scsi_qla_host_t *vha = shost_priv(host);
1207 struct qla_hw_data *ha = vha->hw;
1208 int rval = 0; 1224 int rval = 0;
1209 struct qla_port_param *port_param = NULL; 1225 struct qla_port_param *port_param = NULL;
1210 fc_port_t *fcport = NULL; 1226 fc_port_t *fcport = NULL;
@@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1215 1231
1216 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1232 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1217 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1233 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1218 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 1234 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1235 ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
1219 return -EBUSY; 1236 return -EBUSY;
1237 }
1220 1238
1221 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1239 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1222 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not " 1240 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1223 "supported\n", __func__, vha->host_no));
1224 return -EINVAL; 1241 return -EINVAL;
1225 } 1242 }
1226 1243
1227 port_param = (struct qla_port_param *)((char *)bsg_job->request + 1244 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1228 sizeof(struct fc_bsg_request)); 1245 sizeof(struct fc_bsg_request));
1229 if (!port_param) { 1246 if (!port_param) {
1230 DEBUG2(printk("%s(%ld): port_param header not provided, " 1247 ql_log(ql_log_warn, vha, 0x7047,
1231 "exiting.\n", __func__, vha->host_no)); 1248 "port_param header not provided.\n");
1232 return -EINVAL; 1249 return -EINVAL;
1233 } 1250 }
1234 1251
1235 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1252 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1236 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n", 1253 ql_log(ql_log_warn, vha, 0x7048,
1237 __func__, vha->host_no)); 1254 "Invalid destination type.\n");
1238 return -EINVAL; 1255 return -EINVAL;
1239 } 1256 }
1240 1257
@@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1249 } 1266 }
1250 1267
1251 if (!fcport) { 1268 if (!fcport) {
1252 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n", 1269 ql_log(ql_log_warn, vha, 0x7049,
1253 __func__, vha->host_no)); 1270 "Failed to find port.\n");
1254 return -EINVAL; 1271 return -EINVAL;
1255 } 1272 }
1256 1273
1257 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1274 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1258 DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n", 1275 ql_log(ql_log_warn, vha, 0x704a,
1259 __func__, vha->host_no)); 1276 "Port is not online.\n");
1260 return -EINVAL; 1277 return -EINVAL;
1261 } 1278 }
1262 1279
1263 if (fcport->flags & FCF_LOGIN_NEEDED) { 1280 if (fcport->flags & FCF_LOGIN_NEEDED) {
1264 DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, " 1281 ql_log(ql_log_warn, vha, 0x704b,
1265 "flags = 0x%x\n", 1282 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1266 __func__, vha->host_no, fcport->flags));
1267 return -EINVAL; 1283 return -EINVAL;
1268 } 1284 }
1269 1285
@@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1275 &port_param->speed, mb); 1291 &port_param->speed, mb);
1276 1292
1277 if (rval) { 1293 if (rval) {
1278 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for " 1294 ql_log(ql_log_warn, vha, 0x704c,
1279 "%02x%02x%02x%02x%02x%02x%02x%02x -- " 1295 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1280 "%04x %x %04x %04x.\n", 1296 "%04x %x %04x %04x.\n", fcport->port_name[0],
1281 vha->host_no, fcport->port_name[0], 1297 fcport->port_name[1], fcport->port_name[2],
1282 fcport->port_name[1], 1298 fcport->port_name[3], fcport->port_name[4],
1283 fcport->port_name[2], fcport->port_name[3], 1299 fcport->port_name[5], fcport->port_name[6],
1284 fcport->port_name[4], fcport->port_name[5], 1300 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1285 fcport->port_name[6], fcport->port_name[7], rval,
1286 fcport->fp_speed, mb[0], mb[1]));
1287 rval = 0; 1301 rval = 0;
1288 bsg_job->reply->result = (DID_ERROR << 16); 1302 bsg_job->reply->result = (DID_ERROR << 16);
1289 1303
@@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1307} 1321}
1308 1322
1309static int 1323static int
1310qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, 1324qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1311 uint8_t is_update) 1325 uint8_t is_update)
1312{ 1326{
1313 uint32_t start = 0; 1327 uint32_t start = 0;
1314 int valid = 0; 1328 int valid = 0;
1329 struct qla_hw_data *ha = vha->hw;
1315 1330
1316 bsg_job->reply->reply_payload_rcv_len = 0; 1331 bsg_job->reply->reply_payload_rcv_len = 0;
1317 1332
@@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1319 return -EINVAL; 1334 return -EINVAL;
1320 1335
1321 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1336 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1322 if (start > ha->optrom_size) 1337 if (start > ha->optrom_size) {
1338 ql_log(ql_log_warn, vha, 0x7055,
1339 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1323 return -EINVAL; 1340 return -EINVAL;
1341 }
1324 1342
1325 if (ha->optrom_state != QLA_SWAITING) 1343 if (ha->optrom_state != QLA_SWAITING) {
1344 ql_log(ql_log_info, vha, 0x7056,
1345 "optrom_state %d.\n", ha->optrom_state);
1326 return -EBUSY; 1346 return -EBUSY;
1347 }
1327 1348
1328 ha->optrom_region_start = start; 1349 ha->optrom_region_start = start;
1329 1350 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1330 if (is_update) { 1351 if (is_update) {
1331 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1352 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1332 valid = 1; 1353 valid = 1;
@@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1337 IS_QLA8XXX_TYPE(ha)) 1358 IS_QLA8XXX_TYPE(ha))
1338 valid = 1; 1359 valid = 1;
1339 if (!valid) { 1360 if (!valid) {
1340 qla_printk(KERN_WARNING, ha, 1361 ql_log(ql_log_warn, vha, 0x7058,
1341 "Invalid start region 0x%x/0x%x.\n", 1362 "Invalid start region 0x%x/0x%x.\n", start,
1342 start, bsg_job->request_payload.payload_len); 1363 bsg_job->request_payload.payload_len);
1343 return -EINVAL; 1364 return -EINVAL;
1344 } 1365 }
1345 1366
@@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1358 1379
1359 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1380 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1360 if (!ha->optrom_buffer) { 1381 if (!ha->optrom_buffer) {
1361 qla_printk(KERN_WARNING, ha, 1382 ql_log(ql_log_warn, vha, 0x7059,
1362 "Read: Unable to allocate memory for optrom retrieval " 1383 "Read: Unable to allocate memory for optrom retrieval "
1363 "(%x).\n", ha->optrom_region_size); 1384 "(%x)\n", ha->optrom_region_size);
1364 1385
1365 ha->optrom_state = QLA_SWAITING; 1386 ha->optrom_state = QLA_SWAITING;
1366 return -ENOMEM; 1387 return -ENOMEM;
@@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1378 struct qla_hw_data *ha = vha->hw; 1399 struct qla_hw_data *ha = vha->hw;
1379 int rval = 0; 1400 int rval = 0;
1380 1401
1381 rval = qla2x00_optrom_setup(bsg_job, ha, 0); 1402 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1382 if (rval) 1403 if (rval)
1383 return rval; 1404 return rval;
1384 1405
@@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1406 struct qla_hw_data *ha = vha->hw; 1427 struct qla_hw_data *ha = vha->hw;
1407 int rval = 0; 1428 int rval = 0;
1408 1429
1409 rval = qla2x00_optrom_setup(bsg_job, ha, 1); 1430 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1410 if (rval) 1431 if (rval)
1411 return rval; 1432 return rval;
1412 1433
@@ -1464,6 +1485,23 @@ int
1464qla24xx_bsg_request(struct fc_bsg_job *bsg_job) 1485qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1465{ 1486{
1466 int ret = -EINVAL; 1487 int ret = -EINVAL;
1488 struct fc_rport *rport;
1489 fc_port_t *fcport = NULL;
1490 struct Scsi_Host *host;
1491 scsi_qla_host_t *vha;
1492
1493 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1494 rport = bsg_job->rport;
1495 fcport = *(fc_port_t **) rport->dd_data;
1496 host = rport_to_shost(rport);
1497 vha = shost_priv(host);
1498 } else {
1499 host = bsg_job->shost;
1500 vha = shost_priv(host);
1501 }
1502
1503 ql_dbg(ql_dbg_user, vha, 0x7000,
1504 "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
1467 1505
1468 switch (bsg_job->request->msgcode) { 1506 switch (bsg_job->request->msgcode) {
1469 case FC_BSG_RPT_ELS: 1507 case FC_BSG_RPT_ELS:
@@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1480 case FC_BSG_HST_DEL_RPORT: 1518 case FC_BSG_HST_DEL_RPORT:
1481 case FC_BSG_RPT_CT: 1519 case FC_BSG_RPT_CT:
1482 default: 1520 default:
1483 DEBUG2(printk("qla2xxx: unsupported BSG request\n")); 1521 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1484 break; 1522 break;
1485 } 1523 }
1486 return ret; 1524 return ret;
@@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1514 && (sp_bsg->u.bsg_job == bsg_job)) { 1552 && (sp_bsg->u.bsg_job == bsg_job)) {
1515 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1553 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1516 if (ha->isp_ops->abort_command(sp)) { 1554 if (ha->isp_ops->abort_command(sp)) {
1517 DEBUG2(qla_printk(KERN_INFO, ha, 1555 ql_log(ql_log_warn, vha, 0x7089,
1518 "scsi(%ld): mbx " 1556 "mbx abort_command "
1519 "abort_command failed\n", 1557 "failed.\n");
1520 vha->host_no));
1521 bsg_job->req->errors = 1558 bsg_job->req->errors =
1522 bsg_job->reply->result = -EIO; 1559 bsg_job->reply->result = -EIO;
1523 } else { 1560 } else {
1524 DEBUG2(qla_printk(KERN_INFO, ha, 1561 ql_dbg(ql_dbg_user, vha, 0x708a,
1525 "scsi(%ld): mbx " 1562 "mbx abort_command "
1526 "abort_command success\n", 1563 "success.\n");
1527 vha->host_no));
1528 bsg_job->req->errors = 1564 bsg_job->req->errors =
1529 bsg_job->reply->result = 0; 1565 bsg_job->reply->result = 0;
1530 } 1566 }
@@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1535 } 1571 }
1536 } 1572 }
1537 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1538 DEBUG2(qla_printk(KERN_INFO, ha, 1574 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1539 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1540 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; 1575 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1541 return 0; 1576 return 0;
1542 1577
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c53719a9a747..2155071f3100 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -4,10 +4,36 @@
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7
8/*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * -----------------------------------------------------
12 * | Level | Last Value Used |
13 * -----------------------------------------------------
14 * | Module Init and Probe | 0x0116 |
15 * | Mailbox commands | 0x111e |
16 * | Device Discovery | 0x2083 |
17 * | Queue Command and IO tracing | 0x302e |
18 * | DPC Thread | 0x401c |
19 * | Async Events | 0x5059 |
20 * | Timer Routines | 0x600d |
21 * | User Space Interactions | 0x709c |
22 * | Task Management | 0x8043 |
23 * | AER/EEH | 0x900f |
24 * | Virtual Port | 0xa007 |
25 * | ISP82XX Specific | 0xb027 |
26 * | MultiQ | 0xc00b |
27 * | Misc | 0xd00b |
28 * -----------------------------------------------------
29 */
30
7#include "qla_def.h" 31#include "qla_def.h"
8 32
9#include <linux/delay.h> 33#include <linux/delay.h>
10 34
35static uint32_t ql_dbg_offset = 0x800;
36
11static inline void 37static inline void
12qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 38qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
13{ 39{
@@ -383,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
383 struct qla_hw_data *ha = vha->hw; 409 struct qla_hw_data *ha = vha->hw;
384 410
385 if (rval != QLA_SUCCESS) { 411 if (rval != QLA_SUCCESS) {
386 qla_printk(KERN_WARNING, ha, 412 ql_log(ql_log_warn, vha, 0xd000,
387 "Failed to dump firmware (%x)!!!\n", rval); 413 "Failed to dump firmware (%x).\n", rval);
388 ha->fw_dumped = 0; 414 ha->fw_dumped = 0;
389 } else { 415 } else {
390 qla_printk(KERN_INFO, ha, 416 ql_log(ql_log_info, vha, 0xd001,
391 "Firmware dump saved to temp buffer (%ld/%p).\n", 417 "Firmware dump saved to temp buffer (%ld/%p).\n",
392 vha->host_no, ha->fw_dump); 418 vha->host_no, ha->fw_dump);
393 ha->fw_dumped = 1; 419 ha->fw_dumped = 1;
@@ -419,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
419 spin_lock_irqsave(&ha->hardware_lock, flags); 445 spin_lock_irqsave(&ha->hardware_lock, flags);
420 446
421 if (!ha->fw_dump) { 447 if (!ha->fw_dump) {
422 qla_printk(KERN_WARNING, ha, 448 ql_log(ql_log_warn, vha, 0xd002,
423 "No buffer available for dump!!!\n"); 449 "No buffer available for dump.\n");
424 goto qla2300_fw_dump_failed; 450 goto qla2300_fw_dump_failed;
425 } 451 }
426 452
427 if (ha->fw_dumped) { 453 if (ha->fw_dumped) {
428 qla_printk(KERN_WARNING, ha, 454 ql_log(ql_log_warn, vha, 0xd003,
429 "Firmware has been previously dumped (%p) -- ignoring " 455 "Firmware has been previously dumped (%p) "
430 "request...\n", ha->fw_dump); 456 "-- ignoring request.\n",
457 ha->fw_dump);
431 goto qla2300_fw_dump_failed; 458 goto qla2300_fw_dump_failed;
432 } 459 }
433 fw = &ha->fw_dump->isp.isp23; 460 fw = &ha->fw_dump->isp.isp23;
@@ -582,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
582 spin_lock_irqsave(&ha->hardware_lock, flags); 609 spin_lock_irqsave(&ha->hardware_lock, flags);
583 610
584 if (!ha->fw_dump) { 611 if (!ha->fw_dump) {
585 qla_printk(KERN_WARNING, ha, 612 ql_log(ql_log_warn, vha, 0xd004,
586 "No buffer available for dump!!!\n"); 613 "No buffer available for dump.\n");
587 goto qla2100_fw_dump_failed; 614 goto qla2100_fw_dump_failed;
588 } 615 }
589 616
590 if (ha->fw_dumped) { 617 if (ha->fw_dumped) {
591 qla_printk(KERN_WARNING, ha, 618 ql_log(ql_log_warn, vha, 0xd005,
592 "Firmware has been previously dumped (%p) -- ignoring " 619 "Firmware has been previously dumped (%p) "
593 "request...\n", ha->fw_dump); 620 "-- ignoring request.\n",
621 ha->fw_dump);
594 goto qla2100_fw_dump_failed; 622 goto qla2100_fw_dump_failed;
595 } 623 }
596 fw = &ha->fw_dump->isp.isp21; 624 fw = &ha->fw_dump->isp.isp21;
@@ -779,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
779 spin_lock_irqsave(&ha->hardware_lock, flags); 807 spin_lock_irqsave(&ha->hardware_lock, flags);
780 808
781 if (!ha->fw_dump) { 809 if (!ha->fw_dump) {
782 qla_printk(KERN_WARNING, ha, 810 ql_log(ql_log_warn, vha, 0xd006,
783 "No buffer available for dump!!!\n"); 811 "No buffer available for dump.\n");
784 goto qla24xx_fw_dump_failed; 812 goto qla24xx_fw_dump_failed;
785 } 813 }
786 814
787 if (ha->fw_dumped) { 815 if (ha->fw_dumped) {
788 qla_printk(KERN_WARNING, ha, 816 ql_log(ql_log_warn, vha, 0xd007,
789 "Firmware has been previously dumped (%p) -- ignoring " 817 "Firmware has been previously dumped (%p) "
790 "request...\n", ha->fw_dump); 818 "-- ignoring request.\n",
819 ha->fw_dump);
791 goto qla24xx_fw_dump_failed; 820 goto qla24xx_fw_dump_failed;
792 } 821 }
793 fw = &ha->fw_dump->isp.isp24; 822 fw = &ha->fw_dump->isp.isp24;
@@ -1017,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1017 spin_lock_irqsave(&ha->hardware_lock, flags); 1046 spin_lock_irqsave(&ha->hardware_lock, flags);
1018 1047
1019 if (!ha->fw_dump) { 1048 if (!ha->fw_dump) {
1020 qla_printk(KERN_WARNING, ha, 1049 ql_log(ql_log_warn, vha, 0xd008,
1021 "No buffer available for dump!!!\n"); 1050 "No buffer available for dump.\n");
1022 goto qla25xx_fw_dump_failed; 1051 goto qla25xx_fw_dump_failed;
1023 } 1052 }
1024 1053
1025 if (ha->fw_dumped) { 1054 if (ha->fw_dumped) {
1026 qla_printk(KERN_WARNING, ha, 1055 ql_log(ql_log_warn, vha, 0xd009,
1027 "Firmware has been previously dumped (%p) -- ignoring " 1056 "Firmware has been previously dumped (%p) "
1028 "request...\n", ha->fw_dump); 1057 "-- ignoring request.\n",
1058 ha->fw_dump);
1029 goto qla25xx_fw_dump_failed; 1059 goto qla25xx_fw_dump_failed;
1030 } 1060 }
1031 fw = &ha->fw_dump->isp.isp25; 1061 fw = &ha->fw_dump->isp.isp25;
@@ -1328,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1328 spin_lock_irqsave(&ha->hardware_lock, flags); 1358 spin_lock_irqsave(&ha->hardware_lock, flags);
1329 1359
1330 if (!ha->fw_dump) { 1360 if (!ha->fw_dump) {
1331 qla_printk(KERN_WARNING, ha, 1361 ql_log(ql_log_warn, vha, 0xd00a,
1332 "No buffer available for dump!!!\n"); 1362 "No buffer available for dump.\n");
1333 goto qla81xx_fw_dump_failed; 1363 goto qla81xx_fw_dump_failed;
1334 } 1364 }
1335 1365
1336 if (ha->fw_dumped) { 1366 if (ha->fw_dumped) {
1337 qla_printk(KERN_WARNING, ha, 1367 ql_log(ql_log_warn, vha, 0xd00b,
1338 "Firmware has been previously dumped (%p) -- ignoring " 1368 "Firmware has been previously dumped (%p) "
1339 "request...\n", ha->fw_dump); 1369 "-- ignoring request.\n",
1370 ha->fw_dump);
1340 goto qla81xx_fw_dump_failed; 1371 goto qla81xx_fw_dump_failed;
1341 } 1372 }
1342 fw = &ha->fw_dump->isp.isp81; 1373 fw = &ha->fw_dump->isp.isp81;
@@ -1619,106 +1650,255 @@ qla81xx_fw_dump_failed:
1619/****************************************************************************/ 1650/****************************************************************************/
1620/* Driver Debug Functions. */ 1651/* Driver Debug Functions. */
1621/****************************************************************************/ 1652/****************************************************************************/
1622 1653/*
1654 * This function is for formatting and logging debug information.
1655 * It is to be used when vha is available. It formats the message
1656 * and logs it to the messages file.
1657 * parameters:
1658 * level: The level of the debug messages to be printed.
1659 * If ql2xextended_error_logging value is correctly set,
1660 * this message will appear in the messages file.
1661 * vha: Pointer to the scsi_qla_host_t.
1662 * id: This is a unique identifier for the level. It identifies the
1663 * part of the code from where the message originated.
1664 * msg: The message to be displayed.
1665 */
1623void 1666void
1624qla2x00_dump_regs(scsi_qla_host_t *vha) 1667ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
1625{ 1668
1626 int i; 1669 char pbuf[QL_DBG_BUF_LEN];
1627 struct qla_hw_data *ha = vha->hw; 1670 va_list ap;
1628 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1671 uint32_t len;
1629 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1672 struct pci_dev *pdev = NULL;
1630 uint16_t __iomem *mbx_reg; 1673
1674 memset(pbuf, 0, QL_DBG_BUF_LEN);
1675
1676 va_start(ap, msg);
1677
1678 if ((level & ql2xextended_error_logging) == level) {
1679 if (vha != NULL) {
1680 pdev = vha->hw->pdev;
1681 /* <module-name> <pci-name> <msg-id>:<host> Message */
1682 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
1683 dev_name(&(pdev->dev)), id + ql_dbg_offset,
1684 vha->host_no);
1685 } else
1686 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1687 "0000:00:00.0", id + ql_dbg_offset);
1688
1689 len = strlen(pbuf);
1690 vsprintf(pbuf+len, msg, ap);
1691 pr_warning("%s", pbuf);
1692 }
1631 1693
1632 mbx_reg = IS_FWI2_CAPABLE(ha) ? &reg24->mailbox0: 1694 va_end(ap);
1633 MAILBOX_REG(ha, reg, 0);
1634 1695
1635 printk("Mailbox registers:\n");
1636 for (i = 0; i < 6; i++)
1637 printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
1638 RD_REG_WORD(mbx_reg++));
1639} 1696}
1640 1697
1641 1698/*
1699 * This function is for formatting and logging debug information.
1700 * It is to be used when vha is not available and pci is availble,
1701 * i.e., before host allocation. It formats the message and logs it
1702 * to the messages file.
1703 * parameters:
1704 * level: The level of the debug messages to be printed.
1705 * If ql2xextended_error_logging value is correctly set,
1706 * this message will appear in the messages file.
1707 * pdev: Pointer to the struct pci_dev.
1708 * id: This is a unique id for the level. It identifies the part
1709 * of the code from where the message originated.
1710 * msg: The message to be displayed.
1711 */
1642void 1712void
1643qla2x00_dump_buffer(uint8_t * b, uint32_t size) 1713ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
1644{
1645 uint32_t cnt;
1646 uint8_t c;
1647 1714
1648 printk(" 0 1 2 3 4 5 6 7 8 9 " 1715 char pbuf[QL_DBG_BUF_LEN];
1649 "Ah Bh Ch Dh Eh Fh\n"); 1716 va_list ap;
1650 printk("----------------------------------------" 1717 uint32_t len;
1651 "----------------------\n"); 1718
1652 1719 if (pdev == NULL)
1653 for (cnt = 0; cnt < size;) { 1720 return;
1654 c = *b++; 1721
1655 printk("%02x",(uint32_t) c); 1722 memset(pbuf, 0, QL_DBG_BUF_LEN);
1656 cnt++; 1723
1657 if (!(cnt % 16)) 1724 va_start(ap, msg);
1658 printk("\n"); 1725
1659 else 1726 if ((level & ql2xextended_error_logging) == level) {
1660 printk(" "); 1727 /* <module-name> <dev-name>:<msg-id> Message */
1728 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1729 dev_name(&(pdev->dev)), id + ql_dbg_offset);
1730
1731 len = strlen(pbuf);
1732 vsprintf(pbuf+len, msg, ap);
1733 pr_warning("%s", pbuf);
1661 } 1734 }
1662 if (cnt % 16) 1735
1663 printk("\n"); 1736 va_end(ap);
1737
1664} 1738}
1665 1739
1740/*
1741 * This function is for formatting and logging log messages.
1742 * It is to be used when vha is available. It formats the message
1743 * and logs it to the messages file. All the messages will be logged
1744 * irrespective of value of ql2xextended_error_logging.
1745 * parameters:
1746 * level: The level of the log messages to be printed in the
1747 * messages file.
1748 * vha: Pointer to the scsi_qla_host_t
1749 * id: This is a unique id for the level. It identifies the
1750 * part of the code from where the message originated.
1751 * msg: The message to be displayed.
1752 */
1666void 1753void
1667qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size) 1754ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
1668{
1669 uint32_t cnt;
1670 uint8_t c;
1671 uint8_t last16[16], cur16[16];
1672 uint32_t lc = 0, num_same16 = 0, j;
1673 1755
1674 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 " 1756 char pbuf[QL_DBG_BUF_LEN];
1675 "Ah Bh Ch Dh Eh Fh\n"); 1757 va_list ap;
1676 printk(KERN_DEBUG "----------------------------------------" 1758 uint32_t len;
1677 "----------------------\n"); 1759 struct pci_dev *pdev = NULL;
1678 1760
1679 for (cnt = 0; cnt < size;) { 1761 memset(pbuf, 0, QL_DBG_BUF_LEN);
1680 c = *b++;
1681 1762
1682 cur16[lc++] = c; 1763 va_start(ap, msg);
1683 1764
1684 cnt++; 1765 if (level <= ql_errlev) {
1685 if (cnt % 16) 1766 if (vha != NULL) {
1686 continue; 1767 pdev = vha->hw->pdev;
1687 1768 /* <module-name> <msg-id>:<host> Message */
1688 /* We have 16 now */ 1769 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
1689 lc = 0; 1770 dev_name(&(pdev->dev)), id, vha->host_no);
1690 if (num_same16 == 0) { 1771 } else
1691 memcpy(last16, cur16, 16); 1772 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1692 num_same16++; 1773 "0000:00:00.0", id);
1693 continue; 1774
1775 len = strlen(pbuf);
1776 vsprintf(pbuf+len, msg, ap);
1777
1778 switch (level) {
1779 case 0: /* FATAL LOG */
1780 pr_crit("%s", pbuf);
1781 break;
1782 case 1:
1783 pr_err("%s", pbuf);
1784 break;
1785 case 2:
1786 pr_warn("%s", pbuf);
1787 break;
1788 default:
1789 pr_info("%s", pbuf);
1790 break;
1694 } 1791 }
1695 if (memcmp(cur16, last16, 16) == 0) { 1792 }
1696 num_same16++; 1793
1697 continue; 1794 va_end(ap);
1795}
1796
1797/*
1798 * This function is for formatting and logging log messages.
1799 * It is to be used when vha is not available and pci is availble,
1800 * i.e., before host allocation. It formats the message and logs
1801 * it to the messages file. All the messages are logged irrespective
1802 * of the value of ql2xextended_error_logging.
1803 * parameters:
1804 * level: The level of the log messages to be printed in the
1805 * messages file.
1806 * pdev: Pointer to the struct pci_dev.
1807 * id: This is a unique id for the level. It identifies the
1808 * part of the code from where the message originated.
1809 * msg: The message to be displayed.
1810 */
1811void
1812ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
1813
1814 char pbuf[QL_DBG_BUF_LEN];
1815 va_list ap;
1816 uint32_t len;
1817
1818 if (pdev == NULL)
1819 return;
1820
1821 memset(pbuf, 0, QL_DBG_BUF_LEN);
1822
1823 va_start(ap, msg);
1824
1825 if (level <= ql_errlev) {
1826 /* <module-name> <dev-name>:<msg-id> Message */
1827 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1828 dev_name(&(pdev->dev)), id);
1829
1830 len = strlen(pbuf);
1831 vsprintf(pbuf+len, msg, ap);
1832 switch (level) {
1833 case 0: /* FATAL LOG */
1834 pr_crit("%s", pbuf);
1835 break;
1836 case 1:
1837 pr_err("%s", pbuf);
1838 break;
1839 case 2:
1840 pr_warn("%s", pbuf);
1841 break;
1842 default:
1843 pr_info("%s", pbuf);
1844 break;
1698 } 1845 }
1699 for (j = 0; j < 16; j++)
1700 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1701 printk(KERN_DEBUG "\n");
1702
1703 if (num_same16 > 1)
1704 printk(KERN_DEBUG "> prev pattern repeats (%u)"
1705 "more times\n", num_same16-1);
1706 memcpy(last16, cur16, 16);
1707 num_same16 = 1;
1708 } 1846 }
1709 1847
1710 if (num_same16) { 1848 va_end(ap);
1711 for (j = 0; j < 16; j++) 1849}
1712 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1713 printk(KERN_DEBUG "\n");
1714 1850
1715 if (num_same16 > 1) 1851void
1716 printk(KERN_DEBUG "> prev pattern repeats (%u)" 1852ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
1717 "more times\n", num_same16-1); 1853{
1854 int i;
1855 struct qla_hw_data *ha = vha->hw;
1856 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1857 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1858 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1859 uint16_t __iomem *mbx_reg;
1860
1861 if ((level & ql2xextended_error_logging) == level) {
1862
1863 if (IS_QLA82XX(ha))
1864 mbx_reg = &reg82->mailbox_in[0];
1865 else if (IS_FWI2_CAPABLE(ha))
1866 mbx_reg = &reg24->mailbox0;
1867 else
1868 mbx_reg = MAILBOX_REG(ha, reg, 0);
1869
1870 ql_dbg(level, vha, id, "Mailbox registers:\n");
1871 for (i = 0; i < 6; i++)
1872 ql_dbg(level, vha, id,
1873 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
1718 } 1874 }
1719 if (lc) { 1875}
1720 for (j = 0; j < lc; j++) 1876
1721 printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]); 1877
1722 printk(KERN_DEBUG "\n"); 1878void
1879ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
1880 uint8_t *b, uint32_t size)
1881{
1882 uint32_t cnt;
1883 uint8_t c;
1884 if ((level & ql2xextended_error_logging) == level) {
1885
1886 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
1887 "9 Ah Bh Ch Dh Eh Fh\n");
1888 ql_dbg(level, vha, id, "----------------------------------"
1889 "----------------------------\n");
1890
1891 ql_dbg(level, vha, id, "");
1892 for (cnt = 0; cnt < size;) {
1893 c = *b++;
1894 printk("%02x", (uint32_t) c);
1895 cnt++;
1896 if (!(cnt % 16))
1897 printk("\n");
1898 else
1899 printk(" ");
1900 }
1901 if (cnt % 16)
1902 ql_dbg(level, vha, id, "\n");
1723 } 1903 }
1724} 1904}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 930414541ec6..98a377b99017 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -8,146 +8,6 @@
8#include "qla_def.h" 8#include "qla_def.h"
9 9
10/* 10/*
11 * Driver debug definitions.
12 */
13/* #define QL_DEBUG_LEVEL_1 */ /* Output register accesses to COM1 */
14/* #define QL_DEBUG_LEVEL_2 */ /* Output error msgs to COM1 */
15/* #define QL_DEBUG_LEVEL_3 */ /* Output function trace msgs to COM1 */
16/* #define QL_DEBUG_LEVEL_4 */ /* Output NVRAM trace msgs to COM1 */
17/* #define QL_DEBUG_LEVEL_5 */ /* Output ring trace msgs to COM1 */
18/* #define QL_DEBUG_LEVEL_6 */ /* Output WATCHDOG timer trace to COM1 */
19/* #define QL_DEBUG_LEVEL_7 */ /* Output RISC load trace msgs to COM1 */
20/* #define QL_DEBUG_LEVEL_8 */ /* Output ring saturation msgs to COM1 */
21/* #define QL_DEBUG_LEVEL_9 */ /* Output IOCTL trace msgs */
22/* #define QL_DEBUG_LEVEL_10 */ /* Output IOCTL error msgs */
23/* #define QL_DEBUG_LEVEL_11 */ /* Output Mbx Cmd trace msgs */
24/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
25/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
31
32/*
33* Macros use for debugging the driver.
34*/
35
36#define DEBUG(x) do { if (ql2xextended_error_logging) { x; } } while (0)
37
38#if defined(QL_DEBUG_LEVEL_1)
39#define DEBUG1(x) do {x;} while (0)
40#else
41#define DEBUG1(x) do {} while (0)
42#endif
43
44#define DEBUG2(x) do { if (ql2xextended_error_logging) { x; } } while (0)
45#define DEBUG2_3(x) do { if (ql2xextended_error_logging) { x; } } while (0)
46#define DEBUG2_3_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
47#define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0)
48#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
49#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
50#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
51#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
52
53#if defined(QL_DEBUG_LEVEL_3)
54#define DEBUG3(x) do {x;} while (0)
55#define DEBUG3_11(x) do {x;} while (0)
56#else
57#define DEBUG3(x) do {} while (0)
58#endif
59
60#if defined(QL_DEBUG_LEVEL_4)
61#define DEBUG4(x) do {x;} while (0)
62#else
63#define DEBUG4(x) do {} while (0)
64#endif
65
66#if defined(QL_DEBUG_LEVEL_5)
67#define DEBUG5(x) do {x;} while (0)
68#else
69#define DEBUG5(x) do {} while (0)
70#endif
71
72#if defined(QL_DEBUG_LEVEL_7)
73#define DEBUG7(x) do {x;} while (0)
74#else
75#define DEBUG7(x) do {} while (0)
76#endif
77
78#if defined(QL_DEBUG_LEVEL_9)
79#define DEBUG9(x) do {x;} while (0)
80#define DEBUG9_10(x) do {x;} while (0)
81#else
82#define DEBUG9(x) do {} while (0)
83#endif
84
85#if defined(QL_DEBUG_LEVEL_10)
86#define DEBUG10(x) do {x;} while (0)
87#define DEBUG9_10(x) do {x;} while (0)
88#else
89#define DEBUG10(x) do {} while (0)
90 #if !defined(DEBUG9_10)
91 #define DEBUG9_10(x) do {} while (0)
92 #endif
93#endif
94
95#if defined(QL_DEBUG_LEVEL_11)
96#define DEBUG11(x) do{x;} while(0)
97#if !defined(DEBUG3_11)
98#define DEBUG3_11(x) do{x;} while(0)
99#endif
100#else
101#define DEBUG11(x) do{} while(0)
102 #if !defined(QL_DEBUG_LEVEL_3)
103 #define DEBUG3_11(x) do{} while(0)
104 #endif
105#endif
106
107#if defined(QL_DEBUG_LEVEL_12)
108#define DEBUG12(x) do {x;} while (0)
109#else
110#define DEBUG12(x) do {} while (0)
111#endif
112
113#if defined(QL_DEBUG_LEVEL_13)
114#define DEBUG13(x) do {x;} while (0)
115#else
116#define DEBUG13(x) do {} while (0)
117#endif
118
119#if defined(QL_DEBUG_LEVEL_14)
120#define DEBUG14(x) do {x;} while (0)
121#else
122#define DEBUG14(x) do {} while (0)
123#endif
124
125#if defined(QL_DEBUG_LEVEL_15)
126#define DEBUG15(x) do {x;} while (0)
127#else
128#define DEBUG15(x) do {} while (0)
129#endif
130
131#if defined(QL_DEBUG_LEVEL_16)
132#define DEBUG16(x) do {x;} while (0)
133#else
134#define DEBUG16(x) do {} while (0)
135#endif
136
137#if defined(QL_DEBUG_LEVEL_17)
138#define DEBUG17(x) do {x;} while (0)
139#else
140#define DEBUG17(x) do {} while (0)
141#endif
142
143#if defined(QL_DEBUG_LEVEL_18)
144#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
145#else
146#define DEBUG18(x) do {} while (0)
147#endif
148
149
150/*
151 * Firmware Dump structure definition 11 * Firmware Dump structure definition
152 */ 12 */
153 13
@@ -370,3 +230,50 @@ struct qla2xxx_fw_dump {
370 struct qla81xx_fw_dump isp81; 230 struct qla81xx_fw_dump isp81;
371 } isp; 231 } isp;
372}; 232};
233
234#define QL_MSGHDR "qla2xxx"
235
236#define ql_log_fatal 0 /* display fatal errors */
237#define ql_log_warn 1 /* display critical errors */
238#define ql_log_info 2 /* display all recovered errors */
239#define ql_log_all 3 /* This value is only used by ql_errlev.
240 * No messages will use this value.
241 * This should be always highest value
242 * as compared to other log levels.
243 */
244
245extern int ql_errlev;
246
247void
248ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
249void
250ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
251
252void
253ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
254void
255ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
256
257/* Debug Levels */
258/* The 0x40000000 is the max value any debug level can have
259 * as ql2xextended_error_logging is of type signed int
260 */
261#define ql_dbg_init 0x40000000 /* Init Debug */
262#define ql_dbg_mbx 0x20000000 /* MBX Debug */
263#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */
264#define ql_dbg_io 0x08000000 /* IO Tracing Debug */
265#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */
266#define ql_dbg_async 0x02000000 /* Async events Debug */
267#define ql_dbg_timer 0x01000000 /* Timer Debug */
268#define ql_dbg_user 0x00800000 /* User Space Interations Debug */
269#define ql_dbg_taskm 0x00400000 /* Task Management Debug */
270#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */
271#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */
272#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */
273#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */
274#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */
275#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
276 * not covered by upper categories
277 */
278
279#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a5a4e1275bf2..0b4c2b794c6f 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
64 /* Pause tracing to flush FCE buffers. */ 64 /* Pause tracing to flush FCE buffers. */
65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd); 65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
66 if (rval) 66 if (rval)
67 qla_printk(KERN_WARNING, ha, 67 ql_dbg(ql_dbg_user, vha, 0x705c,
68 "DebugFS: Unable to disable FCE (%d).\n", rval); 68 "DebugFS: Unable to disable FCE (%d).\n", rval);
69 69
70 ha->flags.fce_enabled = 0; 70 ha->flags.fce_enabled = 0;
@@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, 92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
93 ha->fce_mb, &ha->fce_bufs); 93 ha->fce_mb, &ha->fce_bufs);
94 if (rval) { 94 if (rval) {
95 qla_printk(KERN_WARNING, ha, 95 ql_dbg(ql_dbg_user, vha, 0x700d,
96 "DebugFS: Unable to reinitialize FCE (%d).\n", rval); 96 "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
97 ha->flags.fce_enabled = 0; 97 ha->flags.fce_enabled = 0;
98 } 98 }
@@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
125 atomic_set(&qla2x00_dfs_root_count, 0); 125 atomic_set(&qla2x00_dfs_root_count, 0);
126 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); 126 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
127 if (!qla2x00_dfs_root) { 127 if (!qla2x00_dfs_root) {
128 qla_printk(KERN_NOTICE, ha, 128 ql_log(ql_log_warn, vha, 0x00f7,
129 "DebugFS: Unable to create root directory.\n"); 129 "Unable to create debugfs root directory.\n");
130 goto out; 130 goto out;
131 } 131 }
132 132
@@ -137,8 +137,8 @@ create_dir:
137 mutex_init(&ha->fce_mutex); 137 mutex_init(&ha->fce_mutex);
138 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); 138 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
139 if (!ha->dfs_dir) { 139 if (!ha->dfs_dir) {
140 qla_printk(KERN_NOTICE, ha, 140 ql_log(ql_log_warn, vha, 0x00f8,
141 "DebugFS: Unable to create ha directory.\n"); 141 "Unable to create debugfs ha directory.\n");
142 goto out; 142 goto out;
143 } 143 }
144 144
@@ -148,8 +148,8 @@ create_nodes:
148 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, 148 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
149 &dfs_fce_ops); 149 &dfs_fce_ops);
150 if (!ha->dfs_fce) { 150 if (!ha->dfs_fce) {
151 qla_printk(KERN_NOTICE, ha, 151 ql_log(ql_log_warn, vha, 0x00f9,
152 "DebugFS: Unable to fce node.\n"); 152 "Unable to create debugfs fce node.\n");
153 goto out; 153 goto out;
154 } 154 }
155out: 155out:
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b381224ae4b..29b1a3e28231 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp);
185int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 185int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
186 uint16_t, uint16_t, uint8_t); 186 uint16_t, uint16_t, uint8_t);
187extern int qla2x00_start_sp(srb_t *); 187extern int qla2x00_start_sp(srb_t *);
188extern uint16_t qla24xx_calc_iocbs(uint16_t); 188extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
190extern int qla24xx_dif_start_scsi(srb_t *); 190extern int qla24xx_dif_start_scsi(srb_t *);
191 191
@@ -439,6 +439,9 @@ extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
439extern void qla2x00_dump_regs(scsi_qla_host_t *); 439extern void qla2x00_dump_regs(scsi_qla_host_t *);
440extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 440extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
441extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); 441extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
442extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
443extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
444 uint8_t *, uint32_t);
442 445
443/* 446/*
444 * Global Function Prototypes in qla_gs.c source file. 447 * Global Function Prototypes in qla_gs.c source file.
@@ -478,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16
478extern int qla2x00_echo_test(scsi_qla_host_t *, 481extern int qla2x00_echo_test(scsi_qla_host_t *,
479 struct msg_echo_lb *, uint16_t *); 482 struct msg_echo_lb *, uint16_t *);
480extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *); 483extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
481extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t); 484extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
485 struct qla_fcp_prio_cfg *, uint8_t);
482 486
483/* 487/*
484 * Global Function Prototypes in qla_dfs.c source file. 488 * Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 8cd9066ad906..37937aa3c3b8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
121 121
122 rval = QLA_FUNCTION_FAILED; 122 rval = QLA_FUNCTION_FAILED;
123 if (ms_pkt->entry_status != 0) { 123 if (ms_pkt->entry_status != 0) {
124 DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status " 124 ql_dbg(ql_dbg_disc, vha, 0x2031,
125 "(%x) on port_id: %02x%02x%02x.\n", 125 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
126 vha->host_no, routine, ms_pkt->entry_status, 126 routine, ms_pkt->entry_status, vha->d_id.b.domain,
127 vha->d_id.b.domain, vha->d_id.b.area, 127 vha->d_id.b.area, vha->d_id.b.al_pa);
128 vha->d_id.b.al_pa));
129 } else { 128 } else {
130 if (IS_FWI2_CAPABLE(ha)) 129 if (IS_FWI2_CAPABLE(ha))
131 comp_status = le16_to_cpu( 130 comp_status = le16_to_cpu(
@@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
138 case CS_DATA_OVERRUN: /* Overrun? */ 137 case CS_DATA_OVERRUN: /* Overrun? */
139 if (ct_rsp->header.response != 138 if (ct_rsp->header.response !=
140 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 139 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
141 DEBUG2_3(printk("scsi(%ld): %s failed, " 140 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
142 "rejected request on port_id: %02x%02x%02x\n", 141 "%s failed rejected request on port_id: "
143 vha->host_no, routine, 142 "%02x%02x%02x.\n", routine,
144 vha->d_id.b.domain, vha->d_id.b.area, 143 vha->d_id.b.domain, vha->d_id.b.area,
145 vha->d_id.b.al_pa)); 144 vha->d_id.b.al_pa);
146 DEBUG2_3(qla2x00_dump_buffer( 145 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
147 (uint8_t *)&ct_rsp->header, 146 0x2078, (uint8_t *)&ct_rsp->header,
148 sizeof(struct ct_rsp_hdr))); 147 sizeof(struct ct_rsp_hdr));
149 rval = QLA_INVALID_COMMAND; 148 rval = QLA_INVALID_COMMAND;
150 } else 149 } else
151 rval = QLA_SUCCESS; 150 rval = QLA_SUCCESS;
152 break; 151 break;
153 default: 152 default:
154 DEBUG2_3(printk("scsi(%ld): %s failed, completion " 153 ql_dbg(ql_dbg_disc, vha, 0x2033,
155 "status (%x) on port_id: %02x%02x%02x.\n", 154 "%s failed, completion status (%x) on port_id: "
156 vha->host_no, routine, comp_status, 155 "%02x%02x%02x.\n", routine, comp_status,
157 vha->d_id.b.domain, vha->d_id.b.area, 156 vha->d_id.b.domain, vha->d_id.b.area,
158 vha->d_id.b.al_pa)); 157 vha->d_id.b.al_pa);
159 break; 158 break;
160 } 159 }
161 } 160 }
@@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
202 sizeof(ms_iocb_entry_t)); 201 sizeof(ms_iocb_entry_t));
203 if (rval != QLA_SUCCESS) { 202 if (rval != QLA_SUCCESS) {
204 /*EMPTY*/ 203 /*EMPTY*/
205 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", 204 ql_dbg(ql_dbg_disc, vha, 0x2062,
206 vha->host_no, rval)); 205 "GA_NXT issue IOCB failed (%d).\n", rval);
207 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != 206 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
208 QLA_SUCCESS) { 207 QLA_SUCCESS) {
209 rval = QLA_FUNCTION_FAILED; 208 rval = QLA_FUNCTION_FAILED;
@@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
222 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) 221 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
223 fcport->d_id.b.domain = 0xf0; 222 fcport->d_id.b.domain = 0xf0;
224 223
225 DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " 224 ql_dbg(ql_dbg_disc, vha, 0x2063,
226 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 225 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
227 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 226 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
228 "portid=%02x%02x%02x.\n", 227 "port_id=%02x%02x%02x.\n",
229 vha->host_no,
230 fcport->node_name[0], fcport->node_name[1], 228 fcport->node_name[0], fcport->node_name[1],
231 fcport->node_name[2], fcport->node_name[3], 229 fcport->node_name[2], fcport->node_name[3],
232 fcport->node_name[4], fcport->node_name[5], 230 fcport->node_name[4], fcport->node_name[5],
@@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
236 fcport->port_name[4], fcport->port_name[5], 234 fcport->port_name[4], fcport->port_name[5],
237 fcport->port_name[6], fcport->port_name[7], 235 fcport->port_name[6], fcport->port_name[7],
238 fcport->d_id.b.domain, fcport->d_id.b.area, 236 fcport->d_id.b.domain, fcport->d_id.b.area,
239 fcport->d_id.b.al_pa)); 237 fcport->d_id.b.al_pa);
240 } 238 }
241 239
242 return (rval); 240 return (rval);
@@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
287 sizeof(ms_iocb_entry_t)); 285 sizeof(ms_iocb_entry_t));
288 if (rval != QLA_SUCCESS) { 286 if (rval != QLA_SUCCESS) {
289 /*EMPTY*/ 287 /*EMPTY*/
290 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", 288 ql_dbg(ql_dbg_disc, vha, 0x2055,
291 vha->host_no, rval)); 289 "GID_PT issue IOCB failed (%d).\n", rval);
292 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != 290 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
293 QLA_SUCCESS) { 291 QLA_SUCCESS) {
294 rval = QLA_FUNCTION_FAILED; 292 rval = QLA_FUNCTION_FAILED;
@@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
364 sizeof(ms_iocb_entry_t)); 362 sizeof(ms_iocb_entry_t));
365 if (rval != QLA_SUCCESS) { 363 if (rval != QLA_SUCCESS) {
366 /*EMPTY*/ 364 /*EMPTY*/
367 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " 365 ql_dbg(ql_dbg_disc, vha, 0x2056,
368 "(%d).\n", vha->host_no, rval)); 366 "GPN_ID issue IOCB failed (%d).\n", rval);
369 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 367 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
370 "GPN_ID") != QLA_SUCCESS) { 368 "GPN_ID") != QLA_SUCCESS) {
371 rval = QLA_FUNCTION_FAILED; 369 rval = QLA_FUNCTION_FAILED;
@@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
424 sizeof(ms_iocb_entry_t)); 422 sizeof(ms_iocb_entry_t));
425 if (rval != QLA_SUCCESS) { 423 if (rval != QLA_SUCCESS) {
426 /*EMPTY*/ 424 /*EMPTY*/
427 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed " 425 ql_dbg(ql_dbg_disc, vha, 0x2057,
428 "(%d).\n", vha->host_no, rval)); 426 "GNN_ID issue IOCB failed (%d).\n", rval);
429 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 427 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
430 "GNN_ID") != QLA_SUCCESS) { 428 "GNN_ID") != QLA_SUCCESS) {
431 rval = QLA_FUNCTION_FAILED; 429 rval = QLA_FUNCTION_FAILED;
@@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
434 memcpy(list[i].node_name, 432 memcpy(list[i].node_name,
435 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); 433 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
436 434
437 DEBUG2_3(printk("scsi(%ld): GID_PT entry - " 435 ql_dbg(ql_dbg_disc, vha, 0x2058,
438 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 436 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
439 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 437 "pn %02x%02x%02x%02x%02x%02x%02X%02x "
440 "portid=%02x%02x%02x.\n", 438 "portid=%02x%02x%02x.\n",
441 vha->host_no,
442 list[i].node_name[0], list[i].node_name[1], 439 list[i].node_name[0], list[i].node_name[1],
443 list[i].node_name[2], list[i].node_name[3], 440 list[i].node_name[2], list[i].node_name[3],
444 list[i].node_name[4], list[i].node_name[5], 441 list[i].node_name[4], list[i].node_name[5],
@@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
448 list[i].port_name[4], list[i].port_name[5], 445 list[i].port_name[4], list[i].port_name[5],
449 list[i].port_name[6], list[i].port_name[7], 446 list[i].port_name[6], list[i].port_name[7],
450 list[i].d_id.b.domain, list[i].d_id.b.area, 447 list[i].d_id.b.domain, list[i].d_id.b.area,
451 list[i].d_id.b.al_pa)); 448 list[i].d_id.b.al_pa);
452 } 449 }
453 450
454 /* Last device exit. */ 451 /* Last device exit. */
@@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
499 sizeof(ms_iocb_entry_t)); 496 sizeof(ms_iocb_entry_t));
500 if (rval != QLA_SUCCESS) { 497 if (rval != QLA_SUCCESS) {
501 /*EMPTY*/ 498 /*EMPTY*/
502 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n", 499 ql_dbg(ql_dbg_disc, vha, 0x2043,
503 vha->host_no, rval)); 500 "RFT_ID issue IOCB failed (%d).\n", rval);
504 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") != 501 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
505 QLA_SUCCESS) { 502 QLA_SUCCESS) {
506 rval = QLA_FUNCTION_FAILED; 503 rval = QLA_FUNCTION_FAILED;
507 } else { 504 } else {
508 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 505 ql_dbg(ql_dbg_disc, vha, 0x2044,
509 vha->host_no)); 506 "RFT_ID exiting normally.\n");
510 } 507 }
511 508
512 return (rval); 509 return (rval);
@@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
528 struct ct_sns_rsp *ct_rsp; 525 struct ct_sns_rsp *ct_rsp;
529 526
530 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 527 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
531 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " 528 ql_dbg(ql_dbg_disc, vha, 0x2046,
532 "ISP2100/ISP2200.\n", vha->host_no)); 529 "RFF_ID call not supported on ISP2100/ISP2200.\n");
533 return (QLA_SUCCESS); 530 return (QLA_SUCCESS);
534 } 531 }
535 532
@@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
556 sizeof(ms_iocb_entry_t)); 553 sizeof(ms_iocb_entry_t));
557 if (rval != QLA_SUCCESS) { 554 if (rval != QLA_SUCCESS) {
558 /*EMPTY*/ 555 /*EMPTY*/
559 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", 556 ql_dbg(ql_dbg_disc, vha, 0x2047,
560 vha->host_no, rval)); 557 "RFF_ID issue IOCB failed (%d).\n", rval);
561 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") != 558 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
562 QLA_SUCCESS) { 559 QLA_SUCCESS) {
563 rval = QLA_FUNCTION_FAILED; 560 rval = QLA_FUNCTION_FAILED;
564 } else { 561 } else {
565 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", 562 ql_dbg(ql_dbg_disc, vha, 0x2048,
566 vha->host_no)); 563 "RFF_ID exiting normally.\n");
567 } 564 }
568 565
569 return (rval); 566 return (rval);
@@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
609 sizeof(ms_iocb_entry_t)); 606 sizeof(ms_iocb_entry_t));
610 if (rval != QLA_SUCCESS) { 607 if (rval != QLA_SUCCESS) {
611 /*EMPTY*/ 608 /*EMPTY*/
612 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n", 609 ql_dbg(ql_dbg_disc, vha, 0x204d,
613 vha->host_no, rval)); 610 "RNN_ID issue IOCB failed (%d).\n", rval);
614 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") != 611 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
615 QLA_SUCCESS) { 612 QLA_SUCCESS) {
616 rval = QLA_FUNCTION_FAILED; 613 rval = QLA_FUNCTION_FAILED;
617 } else { 614 } else {
618 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 615 ql_dbg(ql_dbg_disc, vha, 0x204e,
619 vha->host_no)); 616 "RNN_ID exiting normally.\n");
620 } 617 }
621 618
622 return (rval); 619 return (rval);
@@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
647 struct ct_sns_rsp *ct_rsp; 644 struct ct_sns_rsp *ct_rsp;
648 645
649 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 646 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
650 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " 647 ql_dbg(ql_dbg_disc, vha, 0x2050,
651 "ISP2100/ISP2200.\n", vha->host_no)); 648 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
652 return (QLA_SUCCESS); 649 return (QLA_SUCCESS);
653 } 650 }
654 651
@@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
682 sizeof(ms_iocb_entry_t)); 679 sizeof(ms_iocb_entry_t));
683 if (rval != QLA_SUCCESS) { 680 if (rval != QLA_SUCCESS) {
684 /*EMPTY*/ 681 /*EMPTY*/
685 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", 682 ql_dbg(ql_dbg_disc, vha, 0x2051,
686 vha->host_no, rval)); 683 "RSNN_NN issue IOCB failed (%d).\n", rval);
687 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") != 684 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
688 QLA_SUCCESS) { 685 QLA_SUCCESS) {
689 rval = QLA_FUNCTION_FAILED; 686 rval = QLA_FUNCTION_FAILED;
690 } else { 687 } else {
691 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", 688 ql_dbg(ql_dbg_disc, vha, 0x2052,
692 vha->host_no)); 689 "RSNN_NN exiting normally.\n");
693 } 690 }
694 691
695 return (rval); 692 return (rval);
@@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
757 sizeof(struct sns_cmd_pkt)); 754 sizeof(struct sns_cmd_pkt));
758 if (rval != QLA_SUCCESS) { 755 if (rval != QLA_SUCCESS) {
759 /*EMPTY*/ 756 /*EMPTY*/
760 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n", 757 ql_dbg(ql_dbg_disc, vha, 0x205f,
761 vha->host_no, rval)); 758 "GA_NXT Send SNS failed (%d).\n", rval);
762 } else if (sns_cmd->p.gan_data[8] != 0x80 || 759 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
763 sns_cmd->p.gan_data[9] != 0x02) { 760 sns_cmd->p.gan_data[9] != 0x02) {
764 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " 761 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
765 "ga_nxt_rsp:\n", vha->host_no)); 762 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
766 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16)); 763 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
764 sns_cmd->p.gan_data, 16);
767 rval = QLA_FUNCTION_FAILED; 765 rval = QLA_FUNCTION_FAILED;
768 } else { 766 } else {
769 /* Populate fc_port_t entry. */ 767 /* Populate fc_port_t entry. */
@@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
778 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) 776 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
779 fcport->d_id.b.domain = 0xf0; 777 fcport->d_id.b.domain = 0xf0;
780 778
781 DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " 779 ql_dbg(ql_dbg_disc, vha, 0x2061,
782 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 780 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
783 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 781 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
784 "portid=%02x%02x%02x.\n", 782 "port_id=%02x%02x%02x.\n",
785 vha->host_no,
786 fcport->node_name[0], fcport->node_name[1], 783 fcport->node_name[0], fcport->node_name[1],
787 fcport->node_name[2], fcport->node_name[3], 784 fcport->node_name[2], fcport->node_name[3],
788 fcport->node_name[4], fcport->node_name[5], 785 fcport->node_name[4], fcport->node_name[5],
@@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
792 fcport->port_name[4], fcport->port_name[5], 789 fcport->port_name[4], fcport->port_name[5],
793 fcport->port_name[6], fcport->port_name[7], 790 fcport->port_name[6], fcport->port_name[7],
794 fcport->d_id.b.domain, fcport->d_id.b.area, 791 fcport->d_id.b.domain, fcport->d_id.b.area,
795 fcport->d_id.b.al_pa)); 792 fcport->d_id.b.al_pa);
796 } 793 }
797 794
798 return (rval); 795 return (rval);
@@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
831 sizeof(struct sns_cmd_pkt)); 828 sizeof(struct sns_cmd_pkt));
832 if (rval != QLA_SUCCESS) { 829 if (rval != QLA_SUCCESS) {
833 /*EMPTY*/ 830 /*EMPTY*/
834 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n", 831 ql_dbg(ql_dbg_disc, vha, 0x206d,
835 vha->host_no, rval)); 832 "GID_PT Send SNS failed (%d).\n", rval);
836 } else if (sns_cmd->p.gid_data[8] != 0x80 || 833 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
837 sns_cmd->p.gid_data[9] != 0x02) { 834 sns_cmd->p.gid_data[9] != 0x02) {
838 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " 835 ql_dbg(ql_dbg_disc, vha, 0x202f,
839 "gid_rsp:\n", vha->host_no)); 836 "GID_PT failed, rejected request, gid_rsp:\n");
840 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16)); 837 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
838 sns_cmd->p.gid_data, 16);
841 rval = QLA_FUNCTION_FAILED; 839 rval = QLA_FUNCTION_FAILED;
842 } else { 840 } else {
843 /* Set port IDs in switch info list. */ 841 /* Set port IDs in switch info list. */
@@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
900 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 898 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
901 if (rval != QLA_SUCCESS) { 899 if (rval != QLA_SUCCESS) {
902 /*EMPTY*/ 900 /*EMPTY*/
903 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed " 901 ql_dbg(ql_dbg_disc, vha, 0x2032,
904 "(%d).\n", vha->host_no, rval)); 902 "GPN_ID Send SNS failed (%d).\n", rval);
905 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 903 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
906 sns_cmd->p.gpn_data[9] != 0x02) { 904 sns_cmd->p.gpn_data[9] != 0x02) {
907 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " 905 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
908 "request, gpn_rsp:\n", vha->host_no)); 906 "GPN_ID failed, rejected request, gpn_rsp:\n");
909 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16)); 907 ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
908 sns_cmd->p.gpn_data, 16);
910 rval = QLA_FUNCTION_FAILED; 909 rval = QLA_FUNCTION_FAILED;
911 } else { 910 } else {
912 /* Save portname */ 911 /* Save portname */
@@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
955 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 954 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
956 if (rval != QLA_SUCCESS) { 955 if (rval != QLA_SUCCESS) {
957 /*EMPTY*/ 956 /*EMPTY*/
958 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed " 957 ql_dbg(ql_dbg_disc, vha, 0x203f,
959 "(%d).\n", vha->host_no, rval)); 958 "GNN_ID Send SNS failed (%d).\n", rval);
960 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 959 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
961 sns_cmd->p.gnn_data[9] != 0x02) { 960 sns_cmd->p.gnn_data[9] != 0x02) {
962 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected " 961 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
963 "request, gnn_rsp:\n", vha->host_no)); 962 "GNN_ID failed, rejected request, gnn_rsp:\n");
964 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16)); 963 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
964 sns_cmd->p.gnn_data, 16);
965 rval = QLA_FUNCTION_FAILED; 965 rval = QLA_FUNCTION_FAILED;
966 } else { 966 } else {
967 /* Save nodename */ 967 /* Save nodename */
968 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], 968 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
969 WWN_SIZE); 969 WWN_SIZE);
970 970
971 DEBUG2_3(printk("scsi(%ld): GID_PT entry - " 971 ql_dbg(ql_dbg_disc, vha, 0x206e,
972 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 972 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
973 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 973 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
974 "portid=%02x%02x%02x.\n", 974 "port_id=%02x%02x%02x.\n",
975 vha->host_no,
976 list[i].node_name[0], list[i].node_name[1], 975 list[i].node_name[0], list[i].node_name[1],
977 list[i].node_name[2], list[i].node_name[3], 976 list[i].node_name[2], list[i].node_name[3],
978 list[i].node_name[4], list[i].node_name[5], 977 list[i].node_name[4], list[i].node_name[5],
@@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
982 list[i].port_name[4], list[i].port_name[5], 981 list[i].port_name[4], list[i].port_name[5],
983 list[i].port_name[6], list[i].port_name[7], 982 list[i].port_name[6], list[i].port_name[7],
984 list[i].d_id.b.domain, list[i].d_id.b.area, 983 list[i].d_id.b.domain, list[i].d_id.b.area,
985 list[i].d_id.b.al_pa)); 984 list[i].d_id.b.al_pa);
986 } 985 }
987 986
988 /* Last device exit. */ 987 /* Last device exit. */
@@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1025 sizeof(struct sns_cmd_pkt)); 1024 sizeof(struct sns_cmd_pkt));
1026 if (rval != QLA_SUCCESS) { 1025 if (rval != QLA_SUCCESS) {
1027 /*EMPTY*/ 1026 /*EMPTY*/
1028 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n", 1027 ql_dbg(ql_dbg_disc, vha, 0x2060,
1029 vha->host_no, rval)); 1028 "RFT_ID Send SNS failed (%d).\n", rval);
1030 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1029 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1031 sns_cmd->p.rft_data[9] != 0x02) { 1030 sns_cmd->p.rft_data[9] != 0x02) {
1032 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, " 1031 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1033 "rft_rsp:\n", vha->host_no)); 1032 "RFT_ID failed, rejected request rft_rsp:\n");
1034 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16)); 1033 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1034 sns_cmd->p.rft_data, 16);
1035 rval = QLA_FUNCTION_FAILED; 1035 rval = QLA_FUNCTION_FAILED;
1036 } else { 1036 } else {
1037 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 1037 ql_dbg(ql_dbg_disc, vha, 0x2073,
1038 vha->host_no)); 1038 "RFT_ID exiting normally.\n");
1039 } 1039 }
1040 1040
1041 return (rval); 1041 return (rval);
@@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1081 sizeof(struct sns_cmd_pkt)); 1081 sizeof(struct sns_cmd_pkt));
1082 if (rval != QLA_SUCCESS) { 1082 if (rval != QLA_SUCCESS) {
1083 /*EMPTY*/ 1083 /*EMPTY*/
1084 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n", 1084 ql_dbg(ql_dbg_disc, vha, 0x204a,
1085 vha->host_no, rval)); 1085 "RNN_ID Send SNS failed (%d).\n", rval);
1086 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1086 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1087 sns_cmd->p.rnn_data[9] != 0x02) { 1087 sns_cmd->p.rnn_data[9] != 0x02) {
1088 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, " 1088 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1089 "rnn_rsp:\n", vha->host_no)); 1089 "RNN_ID failed, rejected request, rnn_rsp:\n");
1090 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16)); 1090 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1091 sns_cmd->p.rnn_data, 16);
1091 rval = QLA_FUNCTION_FAILED; 1092 rval = QLA_FUNCTION_FAILED;
1092 } else { 1093 } else {
1093 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 1094 ql_dbg(ql_dbg_disc, vha, 0x204c,
1094 vha->host_no)); 1095 "RNN_ID exiting normally.\n");
1095 } 1096 }
1096 1097
1097 return (rval); 1098 return (rval);
@@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1116 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1117 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1117 mb, BIT_1|BIT_0); 1118 mb, BIT_1|BIT_0);
1118 if (mb[0] != MBS_COMMAND_COMPLETE) { 1119 if (mb[0] != MBS_COMMAND_COMPLETE) {
1119 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1120 ql_dbg(ql_dbg_disc, vha, 0x2024,
1120 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1121 "Failed management_server login: loopid=%x mb[0]=%x "
1121 __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1], 1122 "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1122 mb[2], mb[6], mb[7])); 1123 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
1123 ret = QLA_FUNCTION_FAILED; 1124 ret = QLA_FUNCTION_FAILED;
1124 } else 1125 } else
1125 vha->flags.management_server_logged_in = 1; 1126 vha->flags.management_server_logged_in = 1;
@@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1292 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); 1293 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1293 size += 4 + WWN_SIZE; 1294 size += 4 + WWN_SIZE;
1294 1295
1295 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n", 1296 ql_dbg(ql_dbg_disc, vha, 0x2025,
1296 __func__, vha->host_no, 1297 "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
1297 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2], 1298 eiter->a.node_name[0], eiter->a.node_name[1],
1298 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5], 1299 eiter->a.node_name[2], eiter->a.node_name[3],
1299 eiter->a.node_name[6], eiter->a.node_name[7])); 1300 eiter->a.node_name[4], eiter->a.node_name[5],
1301 eiter->a.node_name[6], eiter->a.node_name[7]);
1300 1302
1301 /* Manufacturer. */ 1303 /* Manufacturer. */
1302 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1304 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1307 eiter->len = cpu_to_be16(4 + alen); 1309 eiter->len = cpu_to_be16(4 + alen);
1308 size += 4 + alen; 1310 size += 4 + alen;
1309 1311
1310 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no, 1312 ql_dbg(ql_dbg_disc, vha, 0x2026,
1311 eiter->a.manufacturer)); 1313 "Manufacturer = %s.\n", eiter->a.manufacturer);
1312 1314
1313 /* Serial number. */ 1315 /* Serial number. */
1314 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1316 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1320 eiter->len = cpu_to_be16(4 + alen); 1322 eiter->len = cpu_to_be16(4 + alen);
1321 size += 4 + alen; 1323 size += 4 + alen;
1322 1324
1323 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no, 1325 ql_dbg(ql_dbg_disc, vha, 0x2027,
1324 eiter->a.serial_num)); 1326 "Serial no. = %s.\n", eiter->a.serial_num);
1325 1327
1326 /* Model name. */ 1328 /* Model name. */
1327 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1329 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1332 eiter->len = cpu_to_be16(4 + alen); 1334 eiter->len = cpu_to_be16(4 + alen);
1333 size += 4 + alen; 1335 size += 4 + alen;
1334 1336
1335 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no, 1337 ql_dbg(ql_dbg_disc, vha, 0x2028,
1336 eiter->a.model)); 1338 "Model Name = %s.\n", eiter->a.model);
1337 1339
1338 /* Model description. */ 1340 /* Model description. */
1339 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1341 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1345 eiter->len = cpu_to_be16(4 + alen); 1347 eiter->len = cpu_to_be16(4 + alen);
1346 size += 4 + alen; 1348 size += 4 + alen;
1347 1349
1348 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no, 1350 ql_dbg(ql_dbg_disc, vha, 0x2029,
1349 eiter->a.model_desc)); 1351 "Model Desc = %s.\n", eiter->a.model_desc);
1350 1352
1351 /* Hardware version. */ 1353 /* Hardware version. */
1352 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1354 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1357 eiter->len = cpu_to_be16(4 + alen); 1359 eiter->len = cpu_to_be16(4 + alen);
1358 size += 4 + alen; 1360 size += 4 + alen;
1359 1361
1360 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no, 1362 ql_dbg(ql_dbg_disc, vha, 0x202a,
1361 eiter->a.hw_version)); 1363 "Hardware ver = %s.\n", eiter->a.hw_version);
1362 1364
1363 /* Driver version. */ 1365 /* Driver version. */
1364 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1366 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1369 eiter->len = cpu_to_be16(4 + alen); 1371 eiter->len = cpu_to_be16(4 + alen);
1370 size += 4 + alen; 1372 size += 4 + alen;
1371 1373
1372 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no, 1374 ql_dbg(ql_dbg_disc, vha, 0x202b,
1373 eiter->a.driver_version)); 1375 "Driver ver = %s.\n", eiter->a.driver_version);
1374 1376
1375 /* Option ROM version. */ 1377 /* Option ROM version. */
1376 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1378 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1381 eiter->len = cpu_to_be16(4 + alen); 1383 eiter->len = cpu_to_be16(4 + alen);
1382 size += 4 + alen; 1384 size += 4 + alen;
1383 1385
1384 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no, 1386 ql_dbg(ql_dbg_disc, vha , 0x202c,
1385 eiter->a.orom_version)); 1387 "Optrom vers = %s.\n", eiter->a.orom_version);
1386 1388
1387 /* Firmware version */ 1389 /* Firmware version */
1388 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1390 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1393 eiter->len = cpu_to_be16(4 + alen); 1395 eiter->len = cpu_to_be16(4 + alen);
1394 size += 4 + alen; 1396 size += 4 + alen;
1395 1397
1396 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no, 1398 ql_dbg(ql_dbg_disc, vha, 0x202d,
1397 eiter->a.fw_version)); 1399 "Firmware vers = %s.\n", eiter->a.fw_version);
1398 1400
1399 /* Update MS request size. */ 1401 /* Update MS request size. */
1400 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1402 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1401 1403
1402 DEBUG13(printk("%s(%ld): RHBA identifier=" 1404 ql_dbg(ql_dbg_disc, vha, 0x202e,
1403 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1405 "RHBA identifier = "
1404 vha->host_no, ct_req->req.rhba.hba_identifier[0], 1406 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
1407 ct_req->req.rhba.hba_identifier[0],
1405 ct_req->req.rhba.hba_identifier[1], 1408 ct_req->req.rhba.hba_identifier[1],
1406 ct_req->req.rhba.hba_identifier[2], 1409 ct_req->req.rhba.hba_identifier[2],
1407 ct_req->req.rhba.hba_identifier[3], 1410 ct_req->req.rhba.hba_identifier[3],
1408 ct_req->req.rhba.hba_identifier[4], 1411 ct_req->req.rhba.hba_identifier[4],
1409 ct_req->req.rhba.hba_identifier[5], 1412 ct_req->req.rhba.hba_identifier[5],
1410 ct_req->req.rhba.hba_identifier[6], 1413 ct_req->req.rhba.hba_identifier[6],
1411 ct_req->req.rhba.hba_identifier[7], size)); 1414 ct_req->req.rhba.hba_identifier[7], size);
1412 DEBUG13(qla2x00_dump_buffer(entries, size)); 1415 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1416 entries, size);
1413 1417
1414 /* Execute MS IOCB */ 1418 /* Execute MS IOCB */
1415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1419 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1416 sizeof(ms_iocb_entry_t)); 1420 sizeof(ms_iocb_entry_t));
1417 if (rval != QLA_SUCCESS) { 1421 if (rval != QLA_SUCCESS) {
1418 /*EMPTY*/ 1422 /*EMPTY*/
1419 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n", 1423 ql_dbg(ql_dbg_disc, vha, 0x2030,
1420 vha->host_no, rval)); 1424 "RHBA issue IOCB failed (%d).\n", rval);
1421 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") != 1425 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1422 QLA_SUCCESS) { 1426 QLA_SUCCESS) {
1423 rval = QLA_FUNCTION_FAILED; 1427 rval = QLA_FUNCTION_FAILED;
1424 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 1428 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1425 ct_rsp->header.explanation_code == 1429 ct_rsp->header.explanation_code ==
1426 CT_EXPL_ALREADY_REGISTERED) { 1430 CT_EXPL_ALREADY_REGISTERED) {
1427 DEBUG2_13(printk("%s(%ld): HBA already registered.\n", 1431 ql_dbg(ql_dbg_disc, vha, 0x2034,
1428 __func__, vha->host_no)); 1432 "HBA already registered.\n");
1429 rval = QLA_ALREADY_REGISTERED; 1433 rval = QLA_ALREADY_REGISTERED;
1430 } 1434 }
1431 } else { 1435 } else {
1432 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n", 1436 ql_dbg(ql_dbg_disc, vha, 0x2035,
1433 vha->host_no)); 1437 "RHBA exiting normally.\n");
1434 } 1438 }
1435 1439
1436 return rval; 1440 return rval;
@@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1464 /* Prepare FDMI command arguments -- portname. */ 1468 /* Prepare FDMI command arguments -- portname. */
1465 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); 1469 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
1466 1470
1467 DEBUG13(printk("%s(%ld): DHBA portname=" 1471 ql_dbg(ql_dbg_disc, vha, 0x2036,
1468 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no, 1472 "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
1469 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], 1473 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1470 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], 1474 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1471 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], 1475 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1472 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7])); 1476 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
1473 1477
1474 /* Execute MS IOCB */ 1478 /* Execute MS IOCB */
1475 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1479 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1476 sizeof(ms_iocb_entry_t)); 1480 sizeof(ms_iocb_entry_t));
1477 if (rval != QLA_SUCCESS) { 1481 if (rval != QLA_SUCCESS) {
1478 /*EMPTY*/ 1482 /*EMPTY*/
1479 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n", 1483 ql_dbg(ql_dbg_disc, vha, 0x2037,
1480 vha->host_no, rval)); 1484 "DHBA issue IOCB failed (%d).\n", rval);
1481 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != 1485 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
1482 QLA_SUCCESS) { 1486 QLA_SUCCESS) {
1483 rval = QLA_FUNCTION_FAILED; 1487 rval = QLA_FUNCTION_FAILED;
1484 } else { 1488 } else {
1485 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n", 1489 ql_dbg(ql_dbg_disc, vha, 0x2038,
1486 vha->host_no)); 1490 "DHBA exiting normally.\n");
1487 } 1491 }
1488 1492
1489 return rval; 1493 return rval;
@@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1534 eiter->a.fc4_types[2] = 0x01; 1538 eiter->a.fc4_types[2] = 0x01;
1535 size += 4 + 32; 1539 size += 4 + 32;
1536 1540
1537 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, 1541 ql_dbg(ql_dbg_disc, vha, 0x2039,
1538 vha->host_no, eiter->a.fc4_types[2], 1542 "FC4_TYPES=%02x %02x.\n",
1539 eiter->a.fc4_types[1])); 1543 eiter->a.fc4_types[2],
1544 eiter->a.fc4_types[1]);
1540 1545
1541 /* Supported speed. */ 1546 /* Supported speed. */
1542 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1547 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1561 FDMI_PORT_SPEED_1GB); 1566 FDMI_PORT_SPEED_1GB);
1562 size += 4 + 4; 1567 size += 4 + 4;
1563 1568
1564 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no, 1569 ql_dbg(ql_dbg_disc, vha, 0x203a,
1565 eiter->a.sup_speed)); 1570 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1566 1571
1567 /* Current speed. */ 1572 /* Current speed. */
1568 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1573 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1596 } 1601 }
1597 size += 4 + 4; 1602 size += 4 + 4;
1598 1603
1599 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no, 1604 ql_dbg(ql_dbg_disc, vha, 0x203b,
1600 eiter->a.cur_speed)); 1605 "Current_Speed=%x.\n", eiter->a.cur_speed);
1601 1606
1602 /* Max frame size. */ 1607 /* Max frame size. */
1603 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1608 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1609 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1614 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1610 size += 4 + 4; 1615 size += 4 + 4;
1611 1616
1612 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no, 1617 ql_dbg(ql_dbg_disc, vha, 0x203c,
1613 eiter->a.max_frame_size)); 1618 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1614 1619
1615 /* OS device name. */ 1620 /* OS device name. */
1616 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1621 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1621 eiter->len = cpu_to_be16(4 + alen); 1626 eiter->len = cpu_to_be16(4 + alen);
1622 size += 4 + alen; 1627 size += 4 + alen;
1623 1628
1624 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no, 1629 ql_dbg(ql_dbg_disc, vha, 0x204b,
1625 eiter->a.os_dev_name)); 1630 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1626 1631
1627 /* Hostname. */ 1632 /* Hostname. */
1628 if (strlen(fc_host_system_hostname(vha->host))) { 1633 if (strlen(fc_host_system_hostname(vha->host))) {
@@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1637 eiter->len = cpu_to_be16(4 + alen); 1642 eiter->len = cpu_to_be16(4 + alen);
1638 size += 4 + alen; 1643 size += 4 + alen;
1639 1644
1640 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__, 1645 ql_dbg(ql_dbg_disc, vha, 0x203d,
1641 vha->host_no, eiter->a.host_name)); 1646 "HostName=%s.\n", eiter->a.host_name);
1642 } 1647 }
1643 1648
1644 /* Update MS request size. */ 1649 /* Update MS request size. */
1645 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1650 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1646 1651
1647 DEBUG13(printk("%s(%ld): RPA portname=" 1652 ql_dbg(ql_dbg_disc, vha, 0x203e,
1648 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1653 "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
1649 vha->host_no, ct_req->req.rpa.port_name[0], 1654 ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
1650 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2], 1655 ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
1651 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4], 1656 ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
1652 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6], 1657 ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
1653 ct_req->req.rpa.port_name[7], size)); 1658 size);
1654 DEBUG13(qla2x00_dump_buffer(entries, size)); 1659 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1660 entries, size);
1655 1661
1656 /* Execute MS IOCB */ 1662 /* Execute MS IOCB */
1657 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1663 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1658 sizeof(ms_iocb_entry_t)); 1664 sizeof(ms_iocb_entry_t));
1659 if (rval != QLA_SUCCESS) { 1665 if (rval != QLA_SUCCESS) {
1660 /*EMPTY*/ 1666 /*EMPTY*/
1661 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n", 1667 ql_dbg(ql_dbg_disc, vha, 0x2040,
1662 vha->host_no, rval)); 1668 "RPA issue IOCB failed (%d).\n", rval);
1663 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") != 1669 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1664 QLA_SUCCESS) { 1670 QLA_SUCCESS) {
1665 rval = QLA_FUNCTION_FAILED; 1671 rval = QLA_FUNCTION_FAILED;
1666 } else { 1672 } else {
1667 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n", 1673 ql_dbg(ql_dbg_disc, vha, 0x2041,
1668 vha->host_no)); 1674 "RPA exiting nornally.\n");
1669 } 1675 }
1670 1676
1671 return rval; 1677 return rval;
@@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1749 sizeof(ms_iocb_entry_t)); 1755 sizeof(ms_iocb_entry_t));
1750 if (rval != QLA_SUCCESS) { 1756 if (rval != QLA_SUCCESS) {
1751 /*EMPTY*/ 1757 /*EMPTY*/
1752 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB " 1758 ql_dbg(ql_dbg_disc, vha, 0x2023,
1753 "failed (%d).\n", vha->host_no, rval)); 1759 "GFPN_ID issue IOCB failed (%d).\n", rval);
1754 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1760 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1755 "GFPN_ID") != QLA_SUCCESS) { 1761 "GFPN_ID") != QLA_SUCCESS) {
1756 rval = QLA_FUNCTION_FAILED; 1762 rval = QLA_FUNCTION_FAILED;
@@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1860 sizeof(ms_iocb_entry_t)); 1866 sizeof(ms_iocb_entry_t));
1861 if (rval != QLA_SUCCESS) { 1867 if (rval != QLA_SUCCESS) {
1862 /*EMPTY*/ 1868 /*EMPTY*/
1863 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB " 1869 ql_dbg(ql_dbg_disc, vha, 0x2059,
1864 "failed (%d).\n", vha->host_no, rval)); 1870 "GPSC issue IOCB failed (%d).\n", rval);
1865 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1871 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1866 "GPSC")) != QLA_SUCCESS) { 1872 "GPSC")) != QLA_SUCCESS) {
1867 /* FM command unsupported? */ 1873 /* FM command unsupported? */
@@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1870 CT_REASON_INVALID_COMMAND_CODE || 1876 CT_REASON_INVALID_COMMAND_CODE ||
1871 ct_rsp->header.reason_code == 1877 ct_rsp->header.reason_code ==
1872 CT_REASON_COMMAND_UNSUPPORTED)) { 1878 CT_REASON_COMMAND_UNSUPPORTED)) {
1873 DEBUG2(printk("scsi(%ld): GPSC command " 1879 ql_dbg(ql_dbg_disc, vha, 0x205a,
1874 "unsupported, disabling query...\n", 1880 "GPSC command unsupported, disabling "
1875 vha->host_no)); 1881 "query.\n");
1876 ha->flags.gpsc_supported = 0; 1882 ha->flags.gpsc_supported = 0;
1877 rval = QLA_FUNCTION_FAILED; 1883 rval = QLA_FUNCTION_FAILED;
1878 break; 1884 break;
@@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1898 break; 1904 break;
1899 } 1905 }
1900 1906
1901 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - " 1907 ql_dbg(ql_dbg_disc, vha, 0x205b,
1902 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " 1908 "GPSC ext entry - fpn "
1903 "speed=%04x.\n", vha->host_no, 1909 "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
1910 "speed=%04x.\n",
1904 list[i].fabric_port_name[0], 1911 list[i].fabric_port_name[0],
1905 list[i].fabric_port_name[1], 1912 list[i].fabric_port_name[1],
1906 list[i].fabric_port_name[2], 1913 list[i].fabric_port_name[2],
@@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1910 list[i].fabric_port_name[6], 1917 list[i].fabric_port_name[6],
1911 list[i].fabric_port_name[7], 1918 list[i].fabric_port_name[7],
1912 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 1919 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
1913 be16_to_cpu(ct_rsp->rsp.gpsc.speed))); 1920 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
1914 } 1921 }
1915 1922
1916 /* Last device exit. */ 1923 /* Last device exit. */
@@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
1968 sizeof(ms_iocb_entry_t)); 1975 sizeof(ms_iocb_entry_t));
1969 1976
1970 if (rval != QLA_SUCCESS) { 1977 if (rval != QLA_SUCCESS) {
1971 DEBUG2_3(printk(KERN_INFO 1978 ql_dbg(ql_dbg_disc, vha, 0x205c,
1972 "scsi(%ld): GFF_ID issue IOCB failed " 1979 "GFF_ID issue IOCB failed (%d).\n", rval);
1973 "(%d).\n", vha->host_no, rval));
1974 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1980 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1975 "GFF_ID") != QLA_SUCCESS) { 1981 "GFF_ID") != QLA_SUCCESS) {
1976 DEBUG2_3(printk(KERN_INFO 1982 ql_dbg(ql_dbg_disc, vha, 0x205d,
1977 "scsi(%ld): GFF_ID IOCB status had a " 1983 "GFF_ID IOCB status had a failure status code.\n");
1978 "failure status code\n", vha->host_no));
1979 } else { 1984 } else {
1980 fcp_scsi_features = 1985 fcp_scsi_features =
1981 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 1986 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 920b76bfbb93..def694271bf7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -153,11 +153,10 @@ qla2x00_async_iocb_timeout(srb_t *sp)
153 fc_port_t *fcport = sp->fcport; 153 fc_port_t *fcport = sp->fcport;
154 struct srb_ctx *ctx = sp->ctx; 154 struct srb_ctx *ctx = sp->ctx;
155 155
156 DEBUG2(printk(KERN_WARNING 156 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
157 "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n", 157 "Async-%s timeout - portid=%02x%02x%02x.\n",
158 fcport->vha->host_no, sp->handle, 158 ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
159 ctx->name, fcport->d_id.b.domain, 159 fcport->d_id.b.al_pa);
160 fcport->d_id.b.area, fcport->d_id.b.al_pa));
161 160
162 fcport->flags &= ~FCF_ASYNC_SENT; 161 fcport->flags &= ~FCF_ASYNC_SENT;
163 if (ctx->type == SRB_LOGIN_CMD) { 162 if (ctx->type == SRB_LOGIN_CMD) {
@@ -211,11 +210,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
211 if (rval != QLA_SUCCESS) 210 if (rval != QLA_SUCCESS)
212 goto done_free_sp; 211 goto done_free_sp;
213 212
214 DEBUG2(printk(KERN_DEBUG 213 ql_dbg(ql_dbg_disc, vha, 0x2072,
215 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x " 214 "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
216 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id, 215 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
217 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 216 fcport->d_id.b.al_pa, fcport->login_retry);
218 fcport->login_retry));
219 return rval; 217 return rval;
220 218
221done_free_sp: 219done_free_sp:
@@ -259,10 +257,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
259 if (rval != QLA_SUCCESS) 257 if (rval != QLA_SUCCESS)
260 goto done_free_sp; 258 goto done_free_sp;
261 259
262 DEBUG2(printk(KERN_DEBUG 260 ql_dbg(ql_dbg_disc, vha, 0x2070,
263 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n", 261 "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
264 fcport->vha->host_no, sp->handle, fcport->loop_id, 262 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
265 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 263 fcport->d_id.b.al_pa);
266 return rval; 264 return rval;
267 265
268done_free_sp: 266done_free_sp:
@@ -309,11 +307,10 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
309 if (rval != QLA_SUCCESS) 307 if (rval != QLA_SUCCESS)
310 goto done_free_sp; 308 goto done_free_sp;
311 309
312 DEBUG2(printk(KERN_DEBUG 310 ql_dbg(ql_dbg_disc, vha, 0x206f,
313 "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n", 311 "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
314 fcport->vha->host_no, sp->handle, fcport->loop_id, 312 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
315 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 313 fcport->d_id.b.al_pa);
316
317 return rval; 314 return rval;
318 315
319done_free_sp: 316done_free_sp:
@@ -362,11 +359,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
362 if (rval != QLA_SUCCESS) 359 if (rval != QLA_SUCCESS)
363 goto done_free_sp; 360 goto done_free_sp;
364 361
365 DEBUG2(printk(KERN_DEBUG 362 ql_dbg(ql_dbg_taskm, vha, 0x802f,
366 "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n", 363 "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
367 fcport->vha->host_no, sp->handle, fcport->loop_id, 364 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
368 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 365 fcport->d_id.b.al_pa);
369
370 return rval; 366 return rval;
371 367
372done_free_sp: 368done_free_sp:
@@ -471,9 +467,8 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
471 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 467 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
472 468
473 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { 469 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
474 DEBUG2_3_11(printk(KERN_WARNING 470 ql_dbg(ql_dbg_taskm, vha, 0x8030,
475 "%s(%ld): TM IOCB failed (%x).\n", 471 "TM IOCB failed (%x).\n", rval);
476 __func__, vha->host_no, rval));
477 } 472 }
478 473
479 return; 474 return;
@@ -519,11 +514,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
519 set_bit(0, ha->req_qid_map); 514 set_bit(0, ha->req_qid_map);
520 set_bit(0, ha->rsp_qid_map); 515 set_bit(0, ha->rsp_qid_map);
521 516
522 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 517 ql_log(ql_log_info, vha, 0x0040,
518 "Configuring PCI space...\n");
523 rval = ha->isp_ops->pci_config(vha); 519 rval = ha->isp_ops->pci_config(vha);
524 if (rval) { 520 if (rval) {
525 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 521 ql_log(ql_log_warn, vha, 0x0044,
526 vha->host_no)); 522 "Unable to configure PCI space.\n");
527 return (rval); 523 return (rval);
528 } 524 }
529 525
@@ -531,20 +527,21 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
531 527
532 rval = qla2xxx_get_flash_info(vha); 528 rval = qla2xxx_get_flash_info(vha);
533 if (rval) { 529 if (rval) {
534 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 530 ql_log(ql_log_fatal, vha, 0x004f,
535 vha->host_no)); 531 "Unable to validate FLASH data.\n");
536 return (rval); 532 return (rval);
537 } 533 }
538 534
539 ha->isp_ops->get_flash_version(vha, req->ring); 535 ha->isp_ops->get_flash_version(vha, req->ring);
540 536 ql_log(ql_log_info, vha, 0x0061,
541 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 537 "Configure NVRAM parameters...\n");
542 538
543 ha->isp_ops->nvram_config(vha); 539 ha->isp_ops->nvram_config(vha);
544 540
545 if (ha->flags.disable_serdes) { 541 if (ha->flags.disable_serdes) {
546 /* Mask HBA via NVRAM settings? */ 542 /* Mask HBA via NVRAM settings? */
547 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 543 ql_log(ql_log_info, vha, 0x0077,
544 "Masking HBA WWPN "
548 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 545 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
549 vha->port_name[0], vha->port_name[1], 546 vha->port_name[0], vha->port_name[1],
550 vha->port_name[2], vha->port_name[3], 547 vha->port_name[2], vha->port_name[3],
@@ -553,7 +550,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
553 return QLA_FUNCTION_FAILED; 550 return QLA_FUNCTION_FAILED;
554 } 551 }
555 552
556 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 553 ql_log(ql_log_info, vha, 0x0078,
554 "Verifying loaded RISC code...\n");
557 555
558 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 556 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
559 rval = ha->isp_ops->chip_diag(vha); 557 rval = ha->isp_ops->chip_diag(vha);
@@ -567,7 +565,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
567 if (IS_QLA84XX(ha)) { 565 if (IS_QLA84XX(ha)) {
568 ha->cs84xx = qla84xx_get_chip(vha); 566 ha->cs84xx = qla84xx_get_chip(vha);
569 if (!ha->cs84xx) { 567 if (!ha->cs84xx) {
570 qla_printk(KERN_ERR, ha, 568 ql_log(ql_log_warn, vha, 0x00d0,
571 "Unable to configure ISP84XX.\n"); 569 "Unable to configure ISP84XX.\n");
572 return QLA_FUNCTION_FAILED; 570 return QLA_FUNCTION_FAILED;
573 } 571 }
@@ -579,8 +577,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
579 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 577 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
580 rval = qla84xx_init_chip(vha); 578 rval = qla84xx_init_chip(vha);
581 if (rval != QLA_SUCCESS) { 579 if (rval != QLA_SUCCESS) {
582 qla_printk(KERN_ERR, ha, 580 ql_log(ql_log_warn, vha, 0x00d4,
583 "Unable to initialize ISP84XX.\n"); 581 "Unable to initialize ISP84XX.\n");
584 qla84xx_put_chip(vha); 582 qla84xx_put_chip(vha);
585 } 583 }
586 } 584 }
@@ -797,9 +795,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
797 rval = QLA_FUNCTION_FAILED; 795 rval = QLA_FUNCTION_FAILED;
798 796
799 if (ha->flags.disable_risc_code_load) { 797 if (ha->flags.disable_risc_code_load) {
800 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 798 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
801 vha->host_no));
802 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
803 799
804 /* Verify checksum of loaded RISC code. */ 800 /* Verify checksum of loaded RISC code. */
805 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 801 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
@@ -810,10 +806,9 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
810 } 806 }
811 } 807 }
812 808
813 if (rval) { 809 if (rval)
814 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 810 ql_dbg(ql_dbg_init, vha, 0x007a,
815 vha->host_no)); 811 "**** Load RISC code ****.\n");
816 }
817 812
818 return (rval); 813 return (rval);
819} 814}
@@ -1105,8 +1100,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1105 /* Assume a failed state */ 1100 /* Assume a failed state */
1106 rval = QLA_FUNCTION_FAILED; 1101 rval = QLA_FUNCTION_FAILED;
1107 1102
1108 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 1103 ql_dbg(ql_dbg_init, vha, 0x007b,
1109 vha->host_no, (u_long)&reg->flash_address)); 1104 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1110 1105
1111 spin_lock_irqsave(&ha->hardware_lock, flags); 1106 spin_lock_irqsave(&ha->hardware_lock, flags);
1112 1107
@@ -1128,8 +1123,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1128 if (!cnt) 1123 if (!cnt)
1129 goto chip_diag_failed; 1124 goto chip_diag_failed;
1130 1125
1131 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 1126 ql_dbg(ql_dbg_init, vha, 0x007c,
1132 vha->host_no)); 1127 "Reset register cleared by chip reset.\n");
1133 1128
1134 /* Reset RISC processor. */ 1129 /* Reset RISC processor. */
1135 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 1130 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -1150,7 +1145,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1150 goto chip_diag_failed; 1145 goto chip_diag_failed;
1151 1146
1152 /* Check product ID of chip */ 1147 /* Check product ID of chip */
1153 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); 1148 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1154 1149
1155 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 1150 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1156 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 1151 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -1158,8 +1153,9 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1158 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 1153 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1159 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 1154 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1160 mb[3] != PROD_ID_3) { 1155 mb[3] != PROD_ID_3) {
1161 qla_printk(KERN_WARNING, ha, 1156 ql_log(ql_log_warn, vha, 0x0062,
1162 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]); 1157 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
1158 mb[1], mb[2], mb[3]);
1163 1159
1164 goto chip_diag_failed; 1160 goto chip_diag_failed;
1165 } 1161 }
@@ -1178,8 +1174,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1178 if (IS_QLA2200(ha) && 1174 if (IS_QLA2200(ha) &&
1179 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 1175 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1180 /* Limit firmware transfer size with a 2200A */ 1176 /* Limit firmware transfer size with a 2200A */
1181 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 1177 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1182 vha->host_no));
1183 1178
1184 ha->device_type |= DT_ISP2200A; 1179 ha->device_type |= DT_ISP2200A;
1185 ha->fw_transfer_size = 128; 1180 ha->fw_transfer_size = 128;
@@ -1188,24 +1183,20 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1188 /* Wrap Incoming Mailboxes Test. */ 1183 /* Wrap Incoming Mailboxes Test. */
1189 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1190 1185
1191 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no)); 1186 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
1192 rval = qla2x00_mbx_reg_test(vha); 1187 rval = qla2x00_mbx_reg_test(vha);
1193 if (rval) { 1188 if (rval)
1194 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 1189 ql_log(ql_log_warn, vha, 0x0080,
1195 vha->host_no)); 1190 "Failed mailbox send register test.\n");
1196 qla_printk(KERN_WARNING, ha, 1191 else
1197 "Failed mailbox send register test\n");
1198 }
1199 else {
1200 /* Flag a successful rval */ 1192 /* Flag a successful rval */
1201 rval = QLA_SUCCESS; 1193 rval = QLA_SUCCESS;
1202 }
1203 spin_lock_irqsave(&ha->hardware_lock, flags); 1194 spin_lock_irqsave(&ha->hardware_lock, flags);
1204 1195
1205chip_diag_failed: 1196chip_diag_failed:
1206 if (rval) 1197 if (rval)
1207 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 1198 ql_log(ql_log_info, vha, 0x0081,
1208 "****\n", vha->host_no)); 1199 "Chip diagnostics **** FAILED ****.\n");
1209 1200
1210 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1201 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1211 1202
@@ -1232,10 +1223,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
1232 1223
1233 rval = qla2x00_mbx_reg_test(vha); 1224 rval = qla2x00_mbx_reg_test(vha);
1234 if (rval) { 1225 if (rval) {
1235 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 1226 ql_log(ql_log_warn, vha, 0x0082,
1236 vha->host_no)); 1227 "Failed mailbox send register test.\n");
1237 qla_printk(KERN_WARNING, ha,
1238 "Failed mailbox send register test\n");
1239 } else { 1228 } else {
1240 /* Flag a successful rval */ 1229 /* Flag a successful rval */
1241 rval = QLA_SUCCESS; 1230 rval = QLA_SUCCESS;
@@ -1257,8 +1246,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1257 struct rsp_que *rsp = ha->rsp_q_map[0]; 1246 struct rsp_que *rsp = ha->rsp_q_map[0];
1258 1247
1259 if (ha->fw_dump) { 1248 if (ha->fw_dump) {
1260 qla_printk(KERN_WARNING, ha, 1249 ql_dbg(ql_dbg_init, vha, 0x00bd,
1261 "Firmware dump previously allocated.\n"); 1250 "Firmware dump already allocated.\n");
1262 return; 1251 return;
1263 } 1252 }
1264 1253
@@ -1288,8 +1277,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1288 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1277 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1289 GFP_KERNEL); 1278 GFP_KERNEL);
1290 if (!tc) { 1279 if (!tc) {
1291 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1280 ql_log(ql_log_warn, vha, 0x00be,
1292 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 1281 "Unable to allocate (%d KB) for FCE.\n",
1282 FCE_SIZE / 1024);
1293 goto try_eft; 1283 goto try_eft;
1294 } 1284 }
1295 1285
@@ -1297,16 +1287,15 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1297 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 1287 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1298 ha->fce_mb, &ha->fce_bufs); 1288 ha->fce_mb, &ha->fce_bufs);
1299 if (rval) { 1289 if (rval) {
1300 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1290 ql_log(ql_log_warn, vha, 0x00bf,
1301 "FCE (%d).\n", rval); 1291 "Unable to initialize FCE (%d).\n", rval);
1302 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 1292 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1303 tc_dma); 1293 tc_dma);
1304 ha->flags.fce_enabled = 0; 1294 ha->flags.fce_enabled = 0;
1305 goto try_eft; 1295 goto try_eft;
1306 } 1296 }
1307 1297 ql_log(ql_log_info, vha, 0x00c0,
1308 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 1298 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
1309 FCE_SIZE / 1024);
1310 1299
1311 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 1300 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1312 ha->flags.fce_enabled = 1; 1301 ha->flags.fce_enabled = 1;
@@ -1317,23 +1306,23 @@ try_eft:
1317 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 1306 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1318 GFP_KERNEL); 1307 GFP_KERNEL);
1319 if (!tc) { 1308 if (!tc) {
1320 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1309 ql_log(ql_log_warn, vha, 0x00c1,
1321 "(%d KB) for EFT.\n", EFT_SIZE / 1024); 1310 "Unable to allocate (%d KB) for EFT.\n",
1311 EFT_SIZE / 1024);
1322 goto cont_alloc; 1312 goto cont_alloc;
1323 } 1313 }
1324 1314
1325 memset(tc, 0, EFT_SIZE); 1315 memset(tc, 0, EFT_SIZE);
1326 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 1316 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1327 if (rval) { 1317 if (rval) {
1328 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1318 ql_log(ql_log_warn, vha, 0x00c2,
1329 "EFT (%d).\n", rval); 1319 "Unable to initialize EFT (%d).\n", rval);
1330 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 1320 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1331 tc_dma); 1321 tc_dma);
1332 goto cont_alloc; 1322 goto cont_alloc;
1333 } 1323 }
1334 1324 ql_log(ql_log_info, vha, 0x00c3,
1335 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n", 1325 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
1336 EFT_SIZE / 1024);
1337 1326
1338 eft_size = EFT_SIZE; 1327 eft_size = EFT_SIZE;
1339 ha->eft_dma = tc_dma; 1328 ha->eft_dma = tc_dma;
@@ -1350,8 +1339,9 @@ cont_alloc:
1350 1339
1351 ha->fw_dump = vmalloc(dump_size); 1340 ha->fw_dump = vmalloc(dump_size);
1352 if (!ha->fw_dump) { 1341 if (!ha->fw_dump) {
1353 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 1342 ql_log(ql_log_warn, vha, 0x00c4,
1354 "firmware dump!!!\n", dump_size / 1024); 1343 "Unable to allocate (%d KB) for firmware dump.\n",
1344 dump_size / 1024);
1355 1345
1356 if (ha->fce) { 1346 if (ha->fce) {
1357 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 1347 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -1368,8 +1358,8 @@ cont_alloc:
1368 } 1358 }
1369 return; 1359 return;
1370 } 1360 }
1371 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 1361 ql_log(ql_log_info, vha, 0x00c5,
1372 dump_size / 1024); 1362 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1373 1363
1374 ha->fw_dump_len = dump_size; 1364 ha->fw_dump_len = dump_size;
1375 ha->fw_dump->signature[0] = 'Q'; 1365 ha->fw_dump->signature[0] = 'Q';
@@ -1398,23 +1388,21 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
1398 int rval; 1388 int rval;
1399 uint16_t dc; 1389 uint16_t dc;
1400 uint32_t dw; 1390 uint32_t dw;
1401 struct qla_hw_data *ha = vha->hw;
1402 1391
1403 if (!IS_QLA81XX(vha->hw)) 1392 if (!IS_QLA81XX(vha->hw))
1404 return QLA_SUCCESS; 1393 return QLA_SUCCESS;
1405 1394
1406 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 1395 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1407 if (rval != QLA_SUCCESS) { 1396 if (rval != QLA_SUCCESS) {
1408 DEBUG2(qla_printk(KERN_WARNING, ha, 1397 ql_log(ql_log_warn, vha, 0x0105,
1409 "Sync-MPI: Unable to acquire semaphore.\n")); 1398 "Unable to acquire semaphore.\n");
1410 goto done; 1399 goto done;
1411 } 1400 }
1412 1401
1413 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 1402 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1414 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 1403 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1415 if (rval != QLA_SUCCESS) { 1404 if (rval != QLA_SUCCESS) {
1416 DEBUG2(qla_printk(KERN_WARNING, ha, 1405 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
1417 "Sync-MPI: Unable to read sync.\n"));
1418 goto done_release; 1406 goto done_release;
1419 } 1407 }
1420 1408
@@ -1426,15 +1414,14 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
1426 dw |= dc; 1414 dw |= dc;
1427 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 1415 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1428 if (rval != QLA_SUCCESS) { 1416 if (rval != QLA_SUCCESS) {
1429 DEBUG2(qla_printk(KERN_WARNING, ha, 1417 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
1430 "Sync-MPI: Unable to gain sync.\n"));
1431 } 1418 }
1432 1419
1433done_release: 1420done_release:
1434 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 1421 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1435 if (rval != QLA_SUCCESS) { 1422 if (rval != QLA_SUCCESS) {
1436 DEBUG2(qla_printk(KERN_WARNING, ha, 1423 ql_log(ql_log_warn, vha, 0x006d,
1437 "Sync-MPI: Unable to release semaphore.\n")); 1424 "Unable to release semaphore.\n");
1438 } 1425 }
1439 1426
1440done: 1427done:
@@ -1479,14 +1466,14 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1479 /* Load firmware sequences */ 1466 /* Load firmware sequences */
1480 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1467 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1481 if (rval == QLA_SUCCESS) { 1468 if (rval == QLA_SUCCESS) {
1482 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 1469 ql_dbg(ql_dbg_init, vha, 0x00c9,
1483 "code.\n", vha->host_no)); 1470 "Verifying Checksum of loaded RISC code.\n");
1484 1471
1485 rval = qla2x00_verify_checksum(vha, srisc_address); 1472 rval = qla2x00_verify_checksum(vha, srisc_address);
1486 if (rval == QLA_SUCCESS) { 1473 if (rval == QLA_SUCCESS) {
1487 /* Start firmware execution. */ 1474 /* Start firmware execution. */
1488 DEBUG(printk("scsi(%ld): Checksum OK, start " 1475 ql_dbg(ql_dbg_init, vha, 0x00ca,
1489 "firmware.\n", vha->host_no)); 1476 "Starting firmware.\n");
1490 1477
1491 rval = qla2x00_execute_fw(vha, srisc_address); 1478 rval = qla2x00_execute_fw(vha, srisc_address);
1492 /* Retrieve firmware information. */ 1479 /* Retrieve firmware information. */
@@ -1522,9 +1509,9 @@ enable_82xx_npiv:
1522 } 1509 }
1523 } 1510 }
1524 } else { 1511 } else {
1525 DEBUG2(printk(KERN_INFO 1512 ql_log(ql_log_fatal, vha, 0x00cd,
1526 "scsi(%ld): ISP Firmware failed checksum.\n", 1513 "ISP Firmware failed checksum.\n");
1527 vha->host_no)); 1514 goto failed;
1528 } 1515 }
1529 } 1516 }
1530 1517
@@ -1549,7 +1536,7 @@ enable_82xx_npiv:
1549 ha->flags.fac_supported = 1; 1536 ha->flags.fac_supported = 1;
1550 ha->fdt_block_size = size << 2; 1537 ha->fdt_block_size = size << 2;
1551 } else { 1538 } else {
1552 qla_printk(KERN_ERR, ha, 1539 ql_log(ql_log_warn, vha, 0x00ce,
1553 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1540 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1554 ha->fw_major_version, ha->fw_minor_version, 1541 ha->fw_major_version, ha->fw_minor_version,
1555 ha->fw_subminor_version); 1542 ha->fw_subminor_version);
@@ -1557,8 +1544,8 @@ enable_82xx_npiv:
1557 } 1544 }
1558failed: 1545failed:
1559 if (rval) { 1546 if (rval) {
1560 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1547 ql_log(ql_log_fatal, vha, 0x00cf,
1561 vha->host_no)); 1548 "Setup chip ****FAILED****.\n");
1562 } 1549 }
1563 1550
1564 return (rval); 1551 return (rval);
@@ -1608,10 +1595,11 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
1608 return; 1595 return;
1609 1596
1610 /* Serial Link options. */ 1597 /* Serial Link options. */
1611 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1598 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
1612 vha->host_no)); 1599 "Serial link options.\n");
1613 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1600 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
1614 sizeof(ha->fw_seriallink_options))); 1601 (uint8_t *)&ha->fw_seriallink_options,
1602 sizeof(ha->fw_seriallink_options));
1615 1603
1616 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1604 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1617 if (ha->fw_seriallink_options[3] & BIT_2) { 1605 if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -1688,7 +1676,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
1688 le16_to_cpu(ha->fw_seriallink_options24[2]), 1676 le16_to_cpu(ha->fw_seriallink_options24[2]),
1689 le16_to_cpu(ha->fw_seriallink_options24[3])); 1677 le16_to_cpu(ha->fw_seriallink_options24[3]));
1690 if (rval != QLA_SUCCESS) { 1678 if (rval != QLA_SUCCESS) {
1691 qla_printk(KERN_WARNING, ha, 1679 ql_log(ql_log_warn, vha, 0x0104,
1692 "Unable to update Serial Link options (%x).\n", rval); 1680 "Unable to update Serial Link options (%x).\n", rval);
1693 } 1681 }
1694} 1682}
@@ -1746,8 +1734,9 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1746 icb->rid = __constant_cpu_to_le16(rid); 1734 icb->rid = __constant_cpu_to_le16(rid);
1747 if (ha->flags.msix_enabled) { 1735 if (ha->flags.msix_enabled) {
1748 msix = &ha->msix_entries[1]; 1736 msix = &ha->msix_entries[1];
1749 DEBUG2_17(printk(KERN_INFO 1737 ql_dbg(ql_dbg_init, vha, 0x00fd,
1750 "Registering vector 0x%x for base que\n", msix->entry)); 1738 "Registering vector 0x%x for base que.\n",
1739 msix->entry);
1751 icb->msix = cpu_to_le16(msix->entry); 1740 icb->msix = cpu_to_le16(msix->entry);
1752 } 1741 }
1753 /* Use alternate PCI bus number */ 1742 /* Use alternate PCI bus number */
@@ -1764,8 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1764 icb->firmware_options_2 &= 1753 icb->firmware_options_2 &=
1765 __constant_cpu_to_le32(~BIT_22); 1754 __constant_cpu_to_le32(~BIT_22);
1766 ha->flags.disable_msix_handshake = 1; 1755 ha->flags.disable_msix_handshake = 1;
1767 qla_printk(KERN_INFO, ha, 1756 ql_dbg(ql_dbg_init, vha, 0x00fe,
1768 "MSIX Handshake Disable Mode turned on\n"); 1757 "MSIX Handshake Disable Mode turned on.\n");
1769 } else { 1758 } else {
1770 icb->firmware_options_2 |= 1759 icb->firmware_options_2 |=
1771 __constant_cpu_to_le32(BIT_22); 1760 __constant_cpu_to_le32(BIT_22);
@@ -1850,7 +1839,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1850 /* Update any ISP specific firmware options before initialization. */ 1839 /* Update any ISP specific firmware options before initialization. */
1851 ha->isp_ops->update_fw_options(vha); 1840 ha->isp_ops->update_fw_options(vha);
1852 1841
1853 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); 1842 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
1854 1843
1855 if (ha->flags.npiv_supported) { 1844 if (ha->flags.npiv_supported) {
1856 if (ha->operating_mode == LOOP) 1845 if (ha->operating_mode == LOOP)
@@ -1866,11 +1855,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1866 1855
1867 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 1856 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1868 if (rval) { 1857 if (rval) {
1869 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1858 ql_log(ql_log_fatal, vha, 0x00d2,
1870 vha->host_no)); 1859 "Init Firmware **** FAILED ****.\n");
1871 } else { 1860 } else {
1872 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1861 ql_dbg(ql_dbg_init, vha, 0x00d3,
1873 vha->host_no)); 1862 "Init Firmware -- success.\n");
1874 } 1863 }
1875 1864
1876 return (rval); 1865 return (rval);
@@ -1913,10 +1902,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1913 1902
1914 /* Wait for ISP to finish LIP */ 1903 /* Wait for ISP to finish LIP */
1915 if (!vha->flags.init_done) 1904 if (!vha->flags.init_done)
1916 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1905 ql_log(ql_log_info, vha, 0x801e,
1917 1906 "Waiting for LIP to complete.\n");
1918 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1919 vha->host_no));
1920 1907
1921 do { 1908 do {
1922 rval = qla2x00_get_firmware_state(vha, state); 1909 rval = qla2x00_get_firmware_state(vha, state);
@@ -1925,30 +1912,35 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1925 vha->device_flags &= ~DFLG_NO_CABLE; 1912 vha->device_flags &= ~DFLG_NO_CABLE;
1926 } 1913 }
1927 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1914 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1928 DEBUG16(printk("scsi(%ld): fw_state=%x " 1915 ql_dbg(ql_dbg_taskm, vha, 0x801f,
1929 "84xx=%x.\n", vha->host_no, state[0], 1916 "fw_state=%x 84xx=%x.\n", state[0],
1930 state[2])); 1917 state[2]);
1931 if ((state[2] & FSTATE_LOGGED_IN) && 1918 if ((state[2] & FSTATE_LOGGED_IN) &&
1932 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1919 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1933 DEBUG16(printk("scsi(%ld): Sending " 1920 ql_dbg(ql_dbg_taskm, vha, 0x8028,
1934 "verify iocb.\n", vha->host_no)); 1921 "Sending verify iocb.\n");
1935 1922
1936 cs84xx_time = jiffies; 1923 cs84xx_time = jiffies;
1937 rval = qla84xx_init_chip(vha); 1924 rval = qla84xx_init_chip(vha);
1938 if (rval != QLA_SUCCESS) 1925 if (rval != QLA_SUCCESS) {
1926 ql_log(ql_log_warn,
1927 vha, 0x8043,
1928 "Init chip failed.\n");
1939 break; 1929 break;
1930 }
1940 1931
1941 /* Add time taken to initialize. */ 1932 /* Add time taken to initialize. */
1942 cs84xx_time = jiffies - cs84xx_time; 1933 cs84xx_time = jiffies - cs84xx_time;
1943 wtime += cs84xx_time; 1934 wtime += cs84xx_time;
1944 mtime += cs84xx_time; 1935 mtime += cs84xx_time;
1945 DEBUG16(printk("scsi(%ld): Increasing " 1936 ql_dbg(ql_dbg_taskm, vha, 0x8042,
1946 "wait time by %ld. New time %ld\n", 1937 "Increasing wait time by %ld. "
1947 vha->host_no, cs84xx_time, wtime)); 1938 "New time %ld.\n", cs84xx_time,
1939 wtime);
1948 } 1940 }
1949 } else if (state[0] == FSTATE_READY) { 1941 } else if (state[0] == FSTATE_READY) {
1950 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1942 ql_dbg(ql_dbg_taskm, vha, 0x8037,
1951 vha->host_no)); 1943 "F/W Ready - OK.\n");
1952 1944
1953 qla2x00_get_retry_cnt(vha, &ha->retry_count, 1945 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1954 &ha->login_timeout, &ha->r_a_tov); 1946 &ha->login_timeout, &ha->r_a_tov);
@@ -1965,7 +1957,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1965 * other than Wait for Login. 1957 * other than Wait for Login.
1966 */ 1958 */
1967 if (time_after_eq(jiffies, mtime)) { 1959 if (time_after_eq(jiffies, mtime)) {
1968 qla_printk(KERN_INFO, ha, 1960 ql_log(ql_log_info, vha, 0x8038,
1969 "Cable is unplugged...\n"); 1961 "Cable is unplugged...\n");
1970 1962
1971 vha->device_flags |= DFLG_NO_CABLE; 1963 vha->device_flags |= DFLG_NO_CABLE;
@@ -1985,17 +1977,17 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1985 /* Delay for a while */ 1977 /* Delay for a while */
1986 msleep(500); 1978 msleep(500);
1987 1979
1988 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1980 ql_dbg(ql_dbg_taskm, vha, 0x8039,
1989 vha->host_no, state[0], jiffies)); 1981 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1990 } while (1); 1982 } while (1);
1991 1983
1992 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", 1984 ql_dbg(ql_dbg_taskm, vha, 0x803a,
1993 vha->host_no, state[0], state[1], state[2], state[3], state[4], 1985 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
1994 jiffies)); 1986 state[1], state[2], state[3], state[4], jiffies);
1995 1987
1996 if (rval) { 1988 if (rval) {
1997 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1989 ql_log(ql_log_warn, vha, 0x803b,
1998 vha->host_no)); 1990 "Firmware ready **** FAILED ****.\n");
1999 } 1991 }
2000 1992
2001 return (rval); 1993 return (rval);
@@ -2034,19 +2026,19 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2034 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 2026 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2035 IS_QLA8XXX_TYPE(ha) || 2027 IS_QLA8XXX_TYPE(ha) ||
2036 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 2028 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2037 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 2029 ql_dbg(ql_dbg_disc, vha, 0x2008,
2038 __func__, vha->host_no)); 2030 "Loop is in a transition state.\n");
2039 } else { 2031 } else {
2040 qla_printk(KERN_WARNING, ha, 2032 ql_log(ql_log_warn, vha, 0x2009,
2041 "ERROR -- Unable to get host loop ID.\n"); 2033 "Unable to get host loop ID.\n");
2042 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2034 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2043 } 2035 }
2044 return (rval); 2036 return (rval);
2045 } 2037 }
2046 2038
2047 if (topo == 4) { 2039 if (topo == 4) {
2048 qla_printk(KERN_INFO, ha, 2040 ql_log(ql_log_info, vha, 0x200a,
2049 "Cannot get topology - retrying.\n"); 2041 "Cannot get topology - retrying.\n");
2050 return (QLA_FUNCTION_FAILED); 2042 return (QLA_FUNCTION_FAILED);
2051 } 2043 }
2052 2044
@@ -2059,31 +2051,27 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2059 2051
2060 switch (topo) { 2052 switch (topo) {
2061 case 0: 2053 case 0:
2062 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 2054 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
2063 vha->host_no));
2064 ha->current_topology = ISP_CFG_NL; 2055 ha->current_topology = ISP_CFG_NL;
2065 strcpy(connect_type, "(Loop)"); 2056 strcpy(connect_type, "(Loop)");
2066 break; 2057 break;
2067 2058
2068 case 1: 2059 case 1:
2069 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 2060 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2070 vha->host_no));
2071 ha->switch_cap = sw_cap; 2061 ha->switch_cap = sw_cap;
2072 ha->current_topology = ISP_CFG_FL; 2062 ha->current_topology = ISP_CFG_FL;
2073 strcpy(connect_type, "(FL_Port)"); 2063 strcpy(connect_type, "(FL_Port)");
2074 break; 2064 break;
2075 2065
2076 case 2: 2066 case 2:
2077 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 2067 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
2078 vha->host_no));
2079 ha->operating_mode = P2P; 2068 ha->operating_mode = P2P;
2080 ha->current_topology = ISP_CFG_N; 2069 ha->current_topology = ISP_CFG_N;
2081 strcpy(connect_type, "(N_Port-to-N_Port)"); 2070 strcpy(connect_type, "(N_Port-to-N_Port)");
2082 break; 2071 break;
2083 2072
2084 case 3: 2073 case 3:
2085 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 2074 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2086 vha->host_no));
2087 ha->switch_cap = sw_cap; 2075 ha->switch_cap = sw_cap;
2088 ha->operating_mode = P2P; 2076 ha->operating_mode = P2P;
2089 ha->current_topology = ISP_CFG_F; 2077 ha->current_topology = ISP_CFG_F;
@@ -2091,9 +2079,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2091 break; 2079 break;
2092 2080
2093 default: 2081 default:
2094 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 2082 ql_dbg(ql_dbg_disc, vha, 0x200f,
2095 "Using NL.\n", 2083 "HBA in unknown topology %x, using NL.\n", topo);
2096 vha->host_no, topo));
2097 ha->current_topology = ISP_CFG_NL; 2084 ha->current_topology = ISP_CFG_NL;
2098 strcpy(connect_type, "(Loop)"); 2085 strcpy(connect_type, "(Loop)");
2099 break; 2086 break;
@@ -2106,14 +2093,16 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2106 vha->d_id.b.al_pa = al_pa; 2093 vha->d_id.b.al_pa = al_pa;
2107 2094
2108 if (!vha->flags.init_done) 2095 if (!vha->flags.init_done)
2109 qla_printk(KERN_INFO, ha, 2096 ql_log(ql_log_info, vha, 0x2010,
2110 "Topology - %s, Host Loop address 0x%x\n", 2097 "Topology - %s, Host Loop address 0x%x.\n",
2111 connect_type, vha->loop_id); 2098 connect_type, vha->loop_id);
2112 2099
2113 if (rval) { 2100 if (rval) {
2114 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no)); 2101 ql_log(ql_log_warn, vha, 0x2011,
2102 "%s FAILED\n", __func__);
2115 } else { 2103 } else {
2116 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no)); 2104 ql_dbg(ql_dbg_disc, vha, 0x2012,
2105 "%s success\n", __func__);
2117 } 2106 }
2118 2107
2119 return(rval); 2108 return(rval);
@@ -2227,18 +2216,22 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2227 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 2216 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2228 chksum += *ptr++; 2217 chksum += *ptr++;
2229 2218
2230 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 2219 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
2231 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 2220 "Contents of NVRAM.\n");
2221 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
2222 (uint8_t *)nv, ha->nvram_size);
2232 2223
2233 /* Bad NVRAM data, set defaults parameters. */ 2224 /* Bad NVRAM data, set defaults parameters. */
2234 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 2225 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2235 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 2226 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2236 /* Reset NVRAM data. */ 2227 /* Reset NVRAM data. */
2237 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 2228 ql_log(ql_log_warn, vha, 0x0064,
2238 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 2229 "Inconisistent NVRAM "
2239 nv->nvram_version); 2230 "detected: checksum=0x%x id=%c version=0x%x.\n",
2240 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 2231 chksum, nv->id[0], nv->nvram_version);
2241 "invalid -- WWPN) defaults.\n"); 2232 ql_log(ql_log_warn, vha, 0x0065,
2233 "Falling back to "
2234 "functioning (yet invalid -- WWPN) defaults.\n");
2242 2235
2243 /* 2236 /*
2244 * Set default initialization control block. 2237 * Set default initialization control block.
@@ -2382,8 +2375,13 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2382 /* 2375 /*
2383 * Set host adapter parameters. 2376 * Set host adapter parameters.
2384 */ 2377 */
2378
2379 /*
2380 * BIT_7 in the host-parameters section allows for modification to
2381 * internal driver logging.
2382 */
2385 if (nv->host_p[0] & BIT_7) 2383 if (nv->host_p[0] & BIT_7)
2386 ql2xextended_error_logging = 1; 2384 ql2xextended_error_logging = 0x7fffffff;
2387 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 2385 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2388 /* Always load RISC code on non ISP2[12]00 chips. */ 2386 /* Always load RISC code on non ISP2[12]00 chips. */
2389 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 2387 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -2488,10 +2486,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2488 if (ha->zio_mode != QLA_ZIO_DISABLED) { 2486 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2489 ha->zio_mode = QLA_ZIO_MODE_6; 2487 ha->zio_mode = QLA_ZIO_MODE_6;
2490 2488
2491 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 2489 ql_log(ql_log_info, vha, 0x0068,
2492 "delay (%d us).\n", vha->host_no, ha->zio_mode,
2493 ha->zio_timer * 100));
2494 qla_printk(KERN_INFO, ha,
2495 "ZIO mode %d enabled; timer delay (%d us).\n", 2490 "ZIO mode %d enabled; timer delay (%d us).\n",
2496 ha->zio_mode, ha->zio_timer * 100); 2491 ha->zio_mode, ha->zio_timer * 100);
2497 2492
@@ -2502,8 +2497,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2502 } 2497 }
2503 2498
2504 if (rval) { 2499 if (rval) {
2505 DEBUG2_3(printk(KERN_WARNING 2500 ql_log(ql_log_warn, vha, 0x0069,
2506 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 2501 "NVRAM configuration failed.\n");
2507 } 2502 }
2508 return (rval); 2503 return (rval);
2509} 2504}
@@ -2574,15 +2569,15 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2574 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 2569 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2575 rval = qla2x00_configure_hba(vha); 2570 rval = qla2x00_configure_hba(vha);
2576 if (rval != QLA_SUCCESS) { 2571 if (rval != QLA_SUCCESS) {
2577 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2572 ql_dbg(ql_dbg_disc, vha, 0x2013,
2578 vha->host_no)); 2573 "Unable to configure HBA.\n");
2579 return (rval); 2574 return (rval);
2580 } 2575 }
2581 } 2576 }
2582 2577
2583 save_flags = flags = vha->dpc_flags; 2578 save_flags = flags = vha->dpc_flags;
2584 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2579 ql_dbg(ql_dbg_disc, vha, 0x2014,
2585 vha->host_no, flags)); 2580 "Configure loop -- dpc flags = 0x%lx.\n", flags);
2586 2581
2587 /* 2582 /*
2588 * If we have both an RSCN and PORT UPDATE pending then handle them 2583 * If we have both an RSCN and PORT UPDATE pending then handle them
@@ -2619,15 +2614,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2619 } 2614 }
2620 2615
2621 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2616 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2622 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2617 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2618 ql_dbg(ql_dbg_disc, vha, 0x2015,
2619 "Loop resync needed, failing.\n");
2623 rval = QLA_FUNCTION_FAILED; 2620 rval = QLA_FUNCTION_FAILED;
2621 }
2624 else 2622 else
2625 rval = qla2x00_configure_local_loop(vha); 2623 rval = qla2x00_configure_local_loop(vha);
2626 } 2624 }
2627 2625
2628 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2626 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2629 if (LOOP_TRANSITION(vha)) 2627 if (LOOP_TRANSITION(vha)) {
2628 ql_dbg(ql_dbg_disc, vha, 0x201e,
2629 "Needs RSCN update and loop transition.\n");
2630 rval = QLA_FUNCTION_FAILED; 2630 rval = QLA_FUNCTION_FAILED;
2631 }
2631 else 2632 else
2632 rval = qla2x00_configure_fabric(vha); 2633 rval = qla2x00_configure_fabric(vha);
2633 } 2634 }
@@ -2638,16 +2639,17 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2638 rval = QLA_FUNCTION_FAILED; 2639 rval = QLA_FUNCTION_FAILED;
2639 } else { 2640 } else {
2640 atomic_set(&vha->loop_state, LOOP_READY); 2641 atomic_set(&vha->loop_state, LOOP_READY);
2641 2642 ql_dbg(ql_dbg_disc, vha, 0x2069,
2642 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no)); 2643 "LOOP READY.\n");
2643 } 2644 }
2644 } 2645 }
2645 2646
2646 if (rval) { 2647 if (rval) {
2647 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2648 ql_dbg(ql_dbg_disc, vha, 0x206a,
2648 __func__, vha->host_no)); 2649 "%s *** FAILED ***.\n", __func__);
2649 } else { 2650 } else {
2650 DEBUG3(printk("%s: exiting normally\n", __func__)); 2651 ql_dbg(ql_dbg_disc, vha, 0x206b,
2652 "%s: exiting normally.\n", __func__);
2651 } 2653 }
2652 2654
2653 /* Restore state if a resync event occurred during processing */ 2655 /* Restore state if a resync event occurred during processing */
@@ -2695,8 +2697,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2695 new_fcport = NULL; 2697 new_fcport = NULL;
2696 entries = MAX_FIBRE_DEVICES; 2698 entries = MAX_FIBRE_DEVICES;
2697 2699
2698 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no)); 2700 ql_dbg(ql_dbg_disc, vha, 0x2016,
2699 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL)); 2701 "Getting FCAL position map.\n");
2702 if (ql2xextended_error_logging & ql_dbg_disc)
2703 qla2x00_get_fcal_position_map(vha, NULL);
2700 2704
2701 /* Get list of logged in devices. */ 2705 /* Get list of logged in devices. */
2702 memset(ha->gid_list, 0, GID_LIST_SIZE); 2706 memset(ha->gid_list, 0, GID_LIST_SIZE);
@@ -2705,14 +2709,17 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2705 if (rval != QLA_SUCCESS) 2709 if (rval != QLA_SUCCESS)
2706 goto cleanup_allocation; 2710 goto cleanup_allocation;
2707 2711
2708 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2712 ql_dbg(ql_dbg_disc, vha, 0x2017,
2709 vha->host_no, entries)); 2713 "Entries in ID list (%d).\n", entries);
2710 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2714 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
2711 entries * sizeof(struct gid_list_info))); 2715 (uint8_t *)ha->gid_list,
2716 entries * sizeof(struct gid_list_info));
2712 2717
2713 /* Allocate temporary fcport for any new fcports discovered. */ 2718 /* Allocate temporary fcport for any new fcports discovered. */
2714 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2719 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2715 if (new_fcport == NULL) { 2720 if (new_fcport == NULL) {
2721 ql_log(ql_log_warn, vha, 0x2018,
2722 "Memory allocation failed for fcport.\n");
2716 rval = QLA_MEMORY_ALLOC_FAILED; 2723 rval = QLA_MEMORY_ALLOC_FAILED;
2717 goto cleanup_allocation; 2724 goto cleanup_allocation;
2718 } 2725 }
@@ -2726,9 +2733,9 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2726 fcport->port_type != FCT_BROADCAST && 2733 fcport->port_type != FCT_BROADCAST &&
2727 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2734 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2728 2735
2729 DEBUG(printk("scsi(%ld): Marking port lost, " 2736 ql_dbg(ql_dbg_disc, vha, 0x2019,
2730 "loop_id=0x%04x\n", 2737 "Marking port lost loop_id=0x%04x.\n",
2731 vha->host_no, fcport->loop_id)); 2738 fcport->loop_id);
2732 2739
2733 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2740 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2734 } 2741 }
@@ -2769,12 +2776,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2769 new_fcport->vp_idx = vha->vp_idx; 2776 new_fcport->vp_idx = vha->vp_idx;
2770 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2777 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2771 if (rval2 != QLA_SUCCESS) { 2778 if (rval2 != QLA_SUCCESS) {
2772 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2779 ql_dbg(ql_dbg_disc, vha, 0x201a,
2773 "information -- get_port_database=%x, " 2780 "Failed to retrieve fcport information "
2774 "loop_id=0x%04x\n", 2781 "-- get_port_database=%x, loop_id=0x%04x.\n",
2775 vha->host_no, rval2, new_fcport->loop_id)); 2782 rval2, new_fcport->loop_id);
2776 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2783 ql_dbg(ql_dbg_disc, vha, 0x201b,
2777 vha->host_no)); 2784 "Scheduling resync.\n");
2778 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2785 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2779 continue; 2786 continue;
2780 } 2787 }
@@ -2810,6 +2817,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2810 fcport = new_fcport; 2817 fcport = new_fcport;
2811 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2818 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2812 if (new_fcport == NULL) { 2819 if (new_fcport == NULL) {
2820 ql_log(ql_log_warn, vha, 0x201c,
2821 "Failed to allocate memory for fcport.\n");
2813 rval = QLA_MEMORY_ALLOC_FAILED; 2822 rval = QLA_MEMORY_ALLOC_FAILED;
2814 goto cleanup_allocation; 2823 goto cleanup_allocation;
2815 } 2824 }
@@ -2828,8 +2837,8 @@ cleanup_allocation:
2828 kfree(new_fcport); 2837 kfree(new_fcport);
2829 2838
2830 if (rval != QLA_SUCCESS) { 2839 if (rval != QLA_SUCCESS) {
2831 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2840 ql_dbg(ql_dbg_disc, vha, 0x201d,
2832 "rval=%x\n", vha->host_no, rval)); 2841 "Configure local loop error exit: rval=%x.\n", rval);
2833 } 2842 }
2834 2843
2835 return (rval); 2844 return (rval);
@@ -2858,27 +2867,27 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2858 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 2867 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2859 mb); 2868 mb);
2860 if (rval != QLA_SUCCESS) { 2869 if (rval != QLA_SUCCESS) {
2861 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2870 ql_dbg(ql_dbg_disc, vha, 0x2004,
2862 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2871 "Unable to adjust iIDMA "
2863 vha->host_no, fcport->port_name[0], fcport->port_name[1], 2872 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
2873 "%04x.\n", fcport->port_name[0], fcport->port_name[1],
2864 fcport->port_name[2], fcport->port_name[3], 2874 fcport->port_name[2], fcport->port_name[3],
2865 fcport->port_name[4], fcport->port_name[5], 2875 fcport->port_name[4], fcport->port_name[5],
2866 fcport->port_name[6], fcport->port_name[7], rval, 2876 fcport->port_name[6], fcport->port_name[7], rval,
2867 fcport->fp_speed, mb[0], mb[1])); 2877 fcport->fp_speed, mb[0], mb[1]);
2868 } else { 2878 } else {
2869 link_speed = link_speeds[LS_UNKNOWN]; 2879 link_speed = link_speeds[LS_UNKNOWN];
2870 if (fcport->fp_speed < 5) 2880 if (fcport->fp_speed < 5)
2871 link_speed = link_speeds[fcport->fp_speed]; 2881 link_speed = link_speeds[fcport->fp_speed];
2872 else if (fcport->fp_speed == 0x13) 2882 else if (fcport->fp_speed == 0x13)
2873 link_speed = link_speeds[5]; 2883 link_speed = link_speeds[5];
2874 DEBUG2(qla_printk(KERN_INFO, ha, 2884 ql_dbg(ql_dbg_disc, vha, 0x2005,
2875 "iIDMA adjusted to %s GB/s on " 2885 "iIDMA adjusted to %s GB/s "
2876 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2886 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
2877 link_speed, fcport->port_name[0], 2887 fcport->port_name[0], fcport->port_name[1],
2878 fcport->port_name[1], fcport->port_name[2], 2888 fcport->port_name[2], fcport->port_name[3],
2879 fcport->port_name[3], fcport->port_name[4], 2889 fcport->port_name[4], fcport->port_name[5],
2880 fcport->port_name[5], fcport->port_name[6], 2890 fcport->port_name[6], fcport->port_name[7]);
2881 fcport->port_name[7]));
2882 } 2891 }
2883} 2892}
2884 2893
@@ -2887,7 +2896,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2887{ 2896{
2888 struct fc_rport_identifiers rport_ids; 2897 struct fc_rport_identifiers rport_ids;
2889 struct fc_rport *rport; 2898 struct fc_rport *rport;
2890 struct qla_hw_data *ha = vha->hw;
2891 unsigned long flags; 2899 unsigned long flags;
2892 2900
2893 qla2x00_rport_del(fcport); 2901 qla2x00_rport_del(fcport);
@@ -2899,8 +2907,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2899 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2907 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2900 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 2908 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2901 if (!rport) { 2909 if (!rport) {
2902 qla_printk(KERN_WARNING, ha, 2910 ql_log(ql_log_warn, vha, 0x2006,
2903 "Unable to allocate fc remote port!\n"); 2911 "Unable to allocate fc remote port.\n");
2904 return; 2912 return;
2905 } 2913 }
2906 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2914 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -2975,8 +2983,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2975 loop_id = SNS_FL_PORT; 2983 loop_id = SNS_FL_PORT;
2976 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 2984 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2977 if (rval != QLA_SUCCESS) { 2985 if (rval != QLA_SUCCESS) {
2978 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2986 ql_dbg(ql_dbg_disc, vha, 0x201f,
2979 "Port\n", vha->host_no)); 2987 "MBX_GET_PORT_NAME failed, No FL Port.\n");
2980 2988
2981 vha->device_flags &= ~SWITCH_FOUND; 2989 vha->device_flags &= ~SWITCH_FOUND;
2982 return (QLA_SUCCESS); 2990 return (QLA_SUCCESS);
@@ -3003,32 +3011,32 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3003 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 3011 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3004 0xfc, mb, BIT_1 | BIT_0); 3012 0xfc, mb, BIT_1 | BIT_0);
3005 if (mb[0] != MBS_COMMAND_COMPLETE) { 3013 if (mb[0] != MBS_COMMAND_COMPLETE) {
3006 DEBUG2(qla_printk(KERN_INFO, ha, 3014 ql_dbg(ql_dbg_disc, vha, 0x2042,
3007 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 3015 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
3008 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id, 3016 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
3009 mb[0], mb[1], mb[2], mb[6], mb[7])); 3017 mb[2], mb[6], mb[7]);
3010 return (QLA_SUCCESS); 3018 return (QLA_SUCCESS);
3011 } 3019 }
3012 3020
3013 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 3021 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3014 if (qla2x00_rft_id(vha)) { 3022 if (qla2x00_rft_id(vha)) {
3015 /* EMPTY */ 3023 /* EMPTY */
3016 DEBUG2(printk("scsi(%ld): Register FC-4 " 3024 ql_dbg(ql_dbg_disc, vha, 0x2045,
3017 "TYPE failed.\n", vha->host_no)); 3025 "Register FC-4 TYPE failed.\n");
3018 } 3026 }
3019 if (qla2x00_rff_id(vha)) { 3027 if (qla2x00_rff_id(vha)) {
3020 /* EMPTY */ 3028 /* EMPTY */
3021 DEBUG2(printk("scsi(%ld): Register FC-4 " 3029 ql_dbg(ql_dbg_disc, vha, 0x2049,
3022 "Features failed.\n", vha->host_no)); 3030 "Register FC-4 Features failed.\n");
3023 } 3031 }
3024 if (qla2x00_rnn_id(vha)) { 3032 if (qla2x00_rnn_id(vha)) {
3025 /* EMPTY */ 3033 /* EMPTY */
3026 DEBUG2(printk("scsi(%ld): Register Node Name " 3034 ql_dbg(ql_dbg_disc, vha, 0x204f,
3027 "failed.\n", vha->host_no)); 3035 "Register Node Name failed.\n");
3028 } else if (qla2x00_rsnn_nn(vha)) { 3036 } else if (qla2x00_rsnn_nn(vha)) {
3029 /* EMPTY */ 3037 /* EMPTY */
3030 DEBUG2(printk("scsi(%ld): Register Symbolic " 3038 ql_dbg(ql_dbg_disc, vha, 0x2053,
3031 "Node Name failed.\n", vha->host_no)); 3039 "Register Symobilic Node Name failed.\n");
3032 } 3040 }
3033 } 3041 }
3034 3042
@@ -3132,8 +3140,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3132 } 3140 }
3133 3141
3134 if (rval) { 3142 if (rval) {
3135 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 3143 ql_dbg(ql_dbg_disc, vha, 0x2068,
3136 "rval=%d\n", vha->host_no, rval)); 3144 "Configure fabric error exit rval=%d.\n", rval);
3137 } 3145 }
3138 3146
3139 return (rval); 3147 return (rval);
@@ -3175,8 +3183,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3175 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 3183 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3176 if (!swl) { 3184 if (!swl) {
3177 /*EMPTY*/ 3185 /*EMPTY*/
3178 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 3186 ql_dbg(ql_dbg_disc, vha, 0x2054,
3179 "on GA_NXT\n", vha->host_no)); 3187 "GID_PT allocations failed, fallback on GA_NXT.\n");
3180 } else { 3188 } else {
3181 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 3189 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3182 kfree(swl); 3190 kfree(swl);
@@ -3201,6 +3209,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3201 /* Allocate temporary fcport for any new fcports discovered. */ 3209 /* Allocate temporary fcport for any new fcports discovered. */
3202 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3210 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3203 if (new_fcport == NULL) { 3211 if (new_fcport == NULL) {
3212 ql_log(ql_log_warn, vha, 0x205e,
3213 "Failed to allocate memory for fcport.\n");
3204 kfree(swl); 3214 kfree(swl);
3205 return (QLA_MEMORY_ALLOC_FAILED); 3215 return (QLA_MEMORY_ALLOC_FAILED);
3206 } 3216 }
@@ -3247,9 +3257,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3247 /* Send GA_NXT to the switch */ 3257 /* Send GA_NXT to the switch */
3248 rval = qla2x00_ga_nxt(vha, new_fcport); 3258 rval = qla2x00_ga_nxt(vha, new_fcport);
3249 if (rval != QLA_SUCCESS) { 3259 if (rval != QLA_SUCCESS) {
3250 qla_printk(KERN_WARNING, ha, 3260 ql_log(ql_log_warn, vha, 0x2064,
3251 "SNS scan failed -- assuming zero-entry " 3261 "SNS scan failed -- assuming "
3252 "result...\n"); 3262 "zero-entry result.\n");
3253 list_for_each_entry_safe(fcport, fcptemp, 3263 list_for_each_entry_safe(fcport, fcptemp,
3254 new_fcports, list) { 3264 new_fcports, list) {
3255 list_del(&fcport->list); 3265 list_del(&fcport->list);
@@ -3265,9 +3275,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3265 wrap.b24 = new_fcport->d_id.b24; 3275 wrap.b24 = new_fcport->d_id.b24;
3266 first_dev = 0; 3276 first_dev = 0;
3267 } else if (new_fcport->d_id.b24 == wrap.b24) { 3277 } else if (new_fcport->d_id.b24 == wrap.b24) {
3268 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 3278 ql_dbg(ql_dbg_disc, vha, 0x2065,
3269 vha->host_no, new_fcport->d_id.b.domain, 3279 "Device wrap (%02x%02x%02x).\n",
3270 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 3280 new_fcport->d_id.b.domain,
3281 new_fcport->d_id.b.area,
3282 new_fcport->d_id.b.al_pa);
3271 break; 3283 break;
3272 } 3284 }
3273 3285
@@ -3372,6 +3384,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3372 nxt_d_id.b24 = new_fcport->d_id.b24; 3384 nxt_d_id.b24 = new_fcport->d_id.b24;
3373 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3385 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3374 if (new_fcport == NULL) { 3386 if (new_fcport == NULL) {
3387 ql_log(ql_log_warn, vha, 0x2066,
3388 "Memory allocation failed for fcport.\n");
3375 kfree(swl); 3389 kfree(swl);
3376 return (QLA_MEMORY_ALLOC_FAILED); 3390 return (QLA_MEMORY_ALLOC_FAILED);
3377 } 3391 }
@@ -3501,10 +3515,10 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3501 d_id.b.area = MSB(LSW(rscn_entry)); 3515 d_id.b.area = MSB(LSW(rscn_entry));
3502 d_id.b.al_pa = LSB(LSW(rscn_entry)); 3516 d_id.b.al_pa = LSB(LSW(rscn_entry));
3503 3517
3504 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 3518 ql_dbg(ql_dbg_disc, vha, 0x2020,
3505 "[%02x/%02x%02x%02x].\n", 3519 "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
3506 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain, 3520 vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
3507 d_id.b.area, d_id.b.al_pa)); 3521 d_id.b.al_pa);
3508 3522
3509 vha->rscn_out_ptr++; 3523 vha->rscn_out_ptr++;
3510 if (vha->rscn_out_ptr == MAX_RSCN_COUNT) 3524 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
@@ -3520,17 +3534,17 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3520 if (rscn_entry != vha->rscn_queue[rscn_out_iter]) 3534 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3521 break; 3535 break;
3522 3536
3523 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 3537 ql_dbg(ql_dbg_disc, vha, 0x2021,
3524 "entry found at [%d].\n", vha->host_no, 3538 "Skipping duplicate RSCN queue entry found at "
3525 rscn_out_iter)); 3539 "[%d].\n", rscn_out_iter);
3526 3540
3527 vha->rscn_out_ptr = rscn_out_iter; 3541 vha->rscn_out_ptr = rscn_out_iter;
3528 } 3542 }
3529 3543
3530 /* Queue overflow, set switch default case. */ 3544 /* Queue overflow, set switch default case. */
3531 if (vha->flags.rscn_queue_overflow) { 3545 if (vha->flags.rscn_queue_overflow) {
3532 DEBUG(printk("scsi(%ld): device_resync: rscn " 3546 ql_dbg(ql_dbg_disc, vha, 0x2022,
3533 "overflow.\n", vha->host_no)); 3547 "device_resync: rscn overflow.\n");
3534 3548
3535 format = 3; 3549 format = 3;
3536 vha->flags.rscn_queue_overflow = 0; 3550 vha->flags.rscn_queue_overflow = 0;
@@ -3659,10 +3673,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3659 tmp_loopid = 0; 3673 tmp_loopid = 0;
3660 3674
3661 for (;;) { 3675 for (;;) {
3662 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3676 ql_dbg(ql_dbg_disc, vha, 0x2000,
3663 "for port %02x%02x%02x.\n", 3677 "Trying Fabric Login w/loop id 0x%04x for port "
3664 vha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3678 "%02x%02x%02x.\n",
3665 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3679 fcport->loop_id, fcport->d_id.b.domain,
3680 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3666 3681
3667 /* Login fcport on switch. */ 3682 /* Login fcport on switch. */
3668 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3683 ha->isp_ops->fabric_login(vha, fcport->loop_id,
@@ -3680,10 +3695,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3680 tmp_loopid = fcport->loop_id; 3695 tmp_loopid = fcport->loop_id;
3681 fcport->loop_id = mb[1]; 3696 fcport->loop_id = mb[1];
3682 3697
3683 DEBUG(printk("Fabric Login: port in use - next " 3698 ql_dbg(ql_dbg_disc, vha, 0x2001,
3684 "loop id=0x%04x, port Id=%02x%02x%02x.\n", 3699 "Fabric Login: port in use - next loop "
3700 "id=0x%04x, port id= %02x%02x%02x.\n",
3685 fcport->loop_id, fcport->d_id.b.domain, 3701 fcport->loop_id, fcport->d_id.b.domain,
3686 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3702 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3687 3703
3688 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 3704 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3689 /* 3705 /*
@@ -3744,11 +3760,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3744 /* 3760 /*
3745 * unrecoverable / not handled error 3761 * unrecoverable / not handled error
3746 */ 3762 */
3747 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3763 ql_dbg(ql_dbg_disc, vha, 0x2002,
3748 "loop_id=%x jiffies=%lx.\n", 3764 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
3749 __func__, vha->host_no, mb[0], 3765 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
3750 fcport->d_id.b.domain, fcport->d_id.b.area, 3766 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3751 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3767 fcport->loop_id, jiffies);
3752 3768
3753 *next_loopid = fcport->loop_id; 3769 *next_loopid = fcport->loop_id;
3754 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3770 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3852,7 +3868,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3852 return (QLA_FUNCTION_FAILED); 3868 return (QLA_FUNCTION_FAILED);
3853 3869
3854 if (rval) 3870 if (rval)
3855 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3871 ql_dbg(ql_dbg_disc, vha, 0x206c,
3872 "%s *** FAILED ***.\n", __func__);
3856 3873
3857 return (rval); 3874 return (rval);
3858} 3875}
@@ -3929,8 +3946,8 @@ qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
3929 struct qla_hw_data *ha = vha->hw; 3946 struct qla_hw_data *ha = vha->hw;
3930 struct scsi_qla_host *vp; 3947 struct scsi_qla_host *vp;
3931 3948
3932 qla_printk(KERN_INFO, ha, 3949 ql_dbg(ql_dbg_p3p, vha, 0xb002,
3933 "Performing ISP error recovery - ha= %p.\n", ha); 3950 "Performing ISP error recovery - ha=%p.\n", ha);
3934 3951
3935 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3952 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3936 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3953 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -3964,8 +3981,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3964 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3981 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3965 ha->qla_stats.total_isp_aborts++; 3982 ha->qla_stats.total_isp_aborts++;
3966 3983
3967 qla_printk(KERN_INFO, ha, 3984 ql_log(ql_log_info, vha, 0x00af,
3968 "Performing ISP error recovery - ha= %p.\n", ha); 3985 "Performing ISP error recovery - ha=%p.\n", ha);
3969 3986
3970 /* For ISP82XX, reset_chip is just disabling interrupts. 3987 /* For ISP82XX, reset_chip is just disabling interrupts.
3971 * Driver waits for the completion of the commands. 3988 * Driver waits for the completion of the commands.
@@ -4016,6 +4033,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4016 /* Make sure for ISP 82XX IO DMA is complete */ 4033 /* Make sure for ISP 82XX IO DMA is complete */
4017 if (IS_QLA82XX(ha)) { 4034 if (IS_QLA82XX(ha)) {
4018 qla82xx_chip_reset_cleanup(vha); 4035 qla82xx_chip_reset_cleanup(vha);
4036 ql_log(ql_log_info, vha, 0x00b4,
4037 "Done chip reset cleanup.\n");
4019 4038
4020 /* Done waiting for pending commands. 4039 /* Done waiting for pending commands.
4021 * Reset the online flag. 4040 * Reset the online flag.
@@ -4097,7 +4116,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4097 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 4116 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4098 &ha->fce_bufs); 4117 &ha->fce_bufs);
4099 if (rval) { 4118 if (rval) {
4100 qla_printk(KERN_WARNING, ha, 4119 ql_log(ql_log_warn, vha, 0x8033,
4101 "Unable to reinitialize FCE " 4120 "Unable to reinitialize FCE "
4102 "(%d).\n", rval); 4121 "(%d).\n", rval);
4103 ha->flags.fce_enabled = 0; 4122 ha->flags.fce_enabled = 0;
@@ -4109,7 +4128,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4109 rval = qla2x00_enable_eft_trace(vha, 4128 rval = qla2x00_enable_eft_trace(vha,
4110 ha->eft_dma, EFT_NUM_BUFFERS); 4129 ha->eft_dma, EFT_NUM_BUFFERS);
4111 if (rval) { 4130 if (rval) {
4112 qla_printk(KERN_WARNING, ha, 4131 ql_log(ql_log_warn, vha, 0x8034,
4113 "Unable to reinitialize EFT " 4132 "Unable to reinitialize EFT "
4114 "(%d).\n", rval); 4133 "(%d).\n", rval);
4115 } 4134 }
@@ -4118,9 +4137,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4118 vha->flags.online = 1; 4137 vha->flags.online = 1;
4119 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 4138 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4120 if (ha->isp_abort_cnt == 0) { 4139 if (ha->isp_abort_cnt == 0) {
4121 qla_printk(KERN_WARNING, ha, 4140 ql_log(ql_log_fatal, vha, 0x8035,
4122 "ISP error recovery failed - " 4141 "ISP error recover failed - "
4123 "board disabled\n"); 4142 "board disabled.\n");
4124 /* 4143 /*
4125 * The next call disables the board 4144 * The next call disables the board
4126 * completely. 4145 * completely.
@@ -4132,16 +4151,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4132 status = 0; 4151 status = 0;
4133 } else { /* schedule another ISP abort */ 4152 } else { /* schedule another ISP abort */
4134 ha->isp_abort_cnt--; 4153 ha->isp_abort_cnt--;
4135 DEBUG(printk("qla%ld: ISP abort - " 4154 ql_dbg(ql_dbg_taskm, vha, 0x8020,
4136 "retry remaining %d\n", 4155 "ISP abort - retry remaining %d.\n",
4137 vha->host_no, ha->isp_abort_cnt)); 4156 ha->isp_abort_cnt);
4138 status = 1; 4157 status = 1;
4139 } 4158 }
4140 } else { 4159 } else {
4141 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 4160 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4142 DEBUG(printk("qla2x00(%ld): ISP error recovery " 4161 ql_dbg(ql_dbg_taskm, vha, 0x8021,
4143 "- retrying (%d) more times\n", 4162 "ISP error recovery - retrying (%d) "
4144 vha->host_no, ha->isp_abort_cnt)); 4163 "more times.\n", ha->isp_abort_cnt);
4145 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4164 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4146 status = 1; 4165 status = 1;
4147 } 4166 }
@@ -4150,9 +4169,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4150 } 4169 }
4151 4170
4152 if (!status) { 4171 if (!status) {
4153 DEBUG(printk(KERN_INFO 4172 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
4154 "qla2x00_abort_isp(%ld): succeeded.\n",
4155 vha->host_no));
4156 4173
4157 spin_lock_irqsave(&ha->vport_slock, flags); 4174 spin_lock_irqsave(&ha->vport_slock, flags);
4158 list_for_each_entry(vp, &ha->vp_list, list) { 4175 list_for_each_entry(vp, &ha->vp_list, list) {
@@ -4169,8 +4186,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4169 spin_unlock_irqrestore(&ha->vport_slock, flags); 4186 spin_unlock_irqrestore(&ha->vport_slock, flags);
4170 4187
4171 } else { 4188 } else {
4172 qla_printk(KERN_INFO, ha, 4189 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
4173 "qla2x00_abort_isp: **** FAILED ****\n");
4174 } 4190 }
4175 4191
4176 return(status); 4192 return(status);
@@ -4211,8 +4227,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4211 4227
4212 status = qla2x00_fw_ready(vha); 4228 status = qla2x00_fw_ready(vha);
4213 if (!status) { 4229 if (!status) {
4214 DEBUG(printk("%s(): Start configure loop, " 4230 ql_dbg(ql_dbg_taskm, vha, 0x8031,
4215 "status = %d\n", __func__, status)); 4231 "Start configure loop status = %d.\n", status);
4216 4232
4217 /* Issue a marker after FW becomes ready. */ 4233 /* Issue a marker after FW becomes ready. */
4218 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4234 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4234,9 +4250,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4234 if ((vha->device_flags & DFLG_NO_CABLE)) 4250 if ((vha->device_flags & DFLG_NO_CABLE))
4235 status = 0; 4251 status = 0;
4236 4252
4237 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 4253 ql_dbg(ql_dbg_taskm, vha, 0x8032,
4238 __func__, 4254 "Configure loop done, status = 0x%x.\n", status);
4239 status));
4240 } 4255 }
4241 return (status); 4256 return (status);
4242} 4257}
@@ -4256,13 +4271,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4256 rsp->options &= ~BIT_0; 4271 rsp->options &= ~BIT_0;
4257 ret = qla25xx_init_rsp_que(base_vha, rsp); 4272 ret = qla25xx_init_rsp_que(base_vha, rsp);
4258 if (ret != QLA_SUCCESS) 4273 if (ret != QLA_SUCCESS)
4259 DEBUG2_17(printk(KERN_WARNING 4274 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
4260 "%s Rsp que:%d init failed\n", __func__, 4275 "%s Rsp que: %d init failed.\n",
4261 rsp->id)); 4276 __func__, rsp->id);
4262 else 4277 else
4263 DEBUG2_17(printk(KERN_INFO 4278 ql_dbg(ql_dbg_init, base_vha, 0x0100,
4264 "%s Rsp que:%d inited\n", __func__, 4279 "%s Rsp que: %d inited.\n",
4265 rsp->id)); 4280 __func__, rsp->id);
4266 } 4281 }
4267 } 4282 }
4268 for (i = 1; i < ha->max_req_queues; i++) { 4283 for (i = 1; i < ha->max_req_queues; i++) {
@@ -4272,13 +4287,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4272 req->options &= ~BIT_0; 4287 req->options &= ~BIT_0;
4273 ret = qla25xx_init_req_que(base_vha, req); 4288 ret = qla25xx_init_req_que(base_vha, req);
4274 if (ret != QLA_SUCCESS) 4289 if (ret != QLA_SUCCESS)
4275 DEBUG2_17(printk(KERN_WARNING 4290 ql_dbg(ql_dbg_init, base_vha, 0x0101,
4276 "%s Req que:%d init failed\n", __func__, 4291 "%s Req que: %d init failed.\n",
4277 req->id)); 4292 __func__, req->id);
4278 else 4293 else
4279 DEBUG2_17(printk(KERN_WARNING 4294 ql_dbg(ql_dbg_init, base_vha, 0x0102,
4280 "%s Req que:%d inited\n", __func__, 4295 "%s Req que: %d inited.\n",
4281 req->id)); 4296 __func__, req->id);
4282 } 4297 }
4283 } 4298 }
4284 return ret; 4299 return ret;
@@ -4397,19 +4412,22 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4397 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4412 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4398 chksum += le32_to_cpu(*dptr++); 4413 chksum += le32_to_cpu(*dptr++);
4399 4414
4400 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 4415 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
4401 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4416 "Contents of NVRAM\n");
4417 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
4418 (uint8_t *)nv, ha->nvram_size);
4402 4419
4403 /* Bad NVRAM data, set defaults parameters. */ 4420 /* Bad NVRAM data, set defaults parameters. */
4404 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 4421 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4405 || nv->id[3] != ' ' || 4422 || nv->id[3] != ' ' ||
4406 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4423 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4407 /* Reset NVRAM data. */ 4424 /* Reset NVRAM data. */
4408 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 4425 ql_log(ql_log_warn, vha, 0x006b,
4409 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 4426 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
4410 le16_to_cpu(nv->nvram_version)); 4427 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4411 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 4428 ql_log(ql_log_warn, vha, 0x006c,
4412 "invalid -- WWPN) defaults.\n"); 4429 "Falling back to functioning (yet invalid -- WWPN) "
4430 "defaults.\n");
4413 4431
4414 /* 4432 /*
4415 * Set default initialization control block. 4433 * Set default initialization control block.
@@ -4587,10 +4605,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4587 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4605 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4588 ha->zio_mode = QLA_ZIO_MODE_6; 4606 ha->zio_mode = QLA_ZIO_MODE_6;
4589 4607
4590 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 4608 ql_log(ql_log_info, vha, 0x006f,
4591 "(%d us).\n", vha->host_no, ha->zio_mode,
4592 ha->zio_timer * 100));
4593 qla_printk(KERN_INFO, ha,
4594 "ZIO mode %d enabled; timer delay (%d us).\n", 4609 "ZIO mode %d enabled; timer delay (%d us).\n",
4595 ha->zio_mode, ha->zio_timer * 100); 4610 ha->zio_mode, ha->zio_timer * 100);
4596 4611
@@ -4601,8 +4616,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4601 } 4616 }
4602 4617
4603 if (rval) { 4618 if (rval) {
4604 DEBUG2_3(printk(KERN_WARNING 4619 ql_log(ql_log_warn, vha, 0x0070,
4605 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 4620 "NVRAM configuration failed.\n");
4606 } 4621 }
4607 return (rval); 4622 return (rval);
4608} 4623}
@@ -4620,8 +4635,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4620 struct qla_hw_data *ha = vha->hw; 4635 struct qla_hw_data *ha = vha->hw;
4621 struct req_que *req = ha->req_q_map[0]; 4636 struct req_que *req = ha->req_q_map[0];
4622 4637
4623 qla_printk(KERN_INFO, ha, 4638 ql_dbg(ql_dbg_init, vha, 0x008b,
4624 "FW: Loading from flash (%x)...\n", faddr); 4639 "Loading firmware from flash (%x).\n", faddr);
4625 4640
4626 rval = QLA_SUCCESS; 4641 rval = QLA_SUCCESS;
4627 4642
@@ -4637,11 +4652,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4637 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4652 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4638 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4653 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4639 dcode[3] == 0)) { 4654 dcode[3] == 0)) {
4640 qla_printk(KERN_WARNING, ha, 4655 ql_log(ql_log_fatal, vha, 0x008c,
4641 "Unable to verify integrity of flash firmware image!\n"); 4656 "Unable to verify the integrity of flash firmware "
4642 qla_printk(KERN_WARNING, ha, 4657 "image.\n");
4643 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4658 ql_log(ql_log_fatal, vha, 0x008d,
4644 dcode[1], dcode[2], dcode[3]); 4659 "Firmware data: %08x %08x %08x %08x.\n",
4660 dcode[0], dcode[1], dcode[2], dcode[3]);
4645 4661
4646 return QLA_FUNCTION_FAILED; 4662 return QLA_FUNCTION_FAILED;
4647 } 4663 }
@@ -4660,9 +4676,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4660 if (dlen > risc_size) 4676 if (dlen > risc_size)
4661 dlen = risc_size; 4677 dlen = risc_size;
4662 4678
4663 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4679 ql_dbg(ql_dbg_init, vha, 0x008e,
4664 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 4680 "Loading risc segment@ risc addr %x "
4665 vha->host_no, risc_addr, dlen, faddr)); 4681 "number of dwords 0x%x offset 0x%x.\n",
4682 risc_addr, dlen, faddr);
4666 4683
4667 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 4684 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4668 for (i = 0; i < dlen; i++) 4685 for (i = 0; i < dlen; i++)
@@ -4671,12 +4688,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4671 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4688 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4672 dlen); 4689 dlen);
4673 if (rval) { 4690 if (rval) {
4674 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4691 ql_log(ql_log_fatal, vha, 0x008f,
4675 "segment %d of firmware\n", vha->host_no, 4692 "Failed to load segment %d of firmware.\n",
4676 fragment)); 4693 fragment);
4677 qla_printk(KERN_WARNING, ha,
4678 "[ERROR] Failed to load segment %d of "
4679 "firmware\n", fragment);
4680 break; 4694 break;
4681 } 4695 }
4682 4696
@@ -4709,9 +4723,10 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4709 /* Load firmware blob. */ 4723 /* Load firmware blob. */
4710 blob = qla2x00_request_firmware(vha); 4724 blob = qla2x00_request_firmware(vha);
4711 if (!blob) { 4725 if (!blob) {
4712 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4726 ql_log(ql_log_info, vha, 0x0083,
4713 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4727 "Fimware image unavailable.\n");
4714 "from: " QLA_FW_URL ".\n"); 4728 ql_log(ql_log_info, vha, 0x0084,
4729 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
4715 return QLA_FUNCTION_FAILED; 4730 return QLA_FUNCTION_FAILED;
4716 } 4731 }
4717 4732
@@ -4724,8 +4739,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4724 4739
4725 /* Validate firmware image by checking version. */ 4740 /* Validate firmware image by checking version. */
4726 if (blob->fw->size < 8 * sizeof(uint16_t)) { 4741 if (blob->fw->size < 8 * sizeof(uint16_t)) {
4727 qla_printk(KERN_WARNING, ha, 4742 ql_log(ql_log_fatal, vha, 0x0085,
4728 "Unable to verify integrity of firmware image (%Zd)!\n", 4743 "Unable to verify integrity of firmware image (%Zd).\n",
4729 blob->fw->size); 4744 blob->fw->size);
4730 goto fail_fw_integrity; 4745 goto fail_fw_integrity;
4731 } 4746 }
@@ -4734,11 +4749,11 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4734 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 4749 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4735 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 4750 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4736 wcode[2] == 0 && wcode[3] == 0)) { 4751 wcode[2] == 0 && wcode[3] == 0)) {
4737 qla_printk(KERN_WARNING, ha, 4752 ql_log(ql_log_fatal, vha, 0x0086,
4738 "Unable to verify integrity of firmware image!\n"); 4753 "Unable to verify integrity of firmware image.\n");
4739 qla_printk(KERN_WARNING, ha, 4754 ql_log(ql_log_fatal, vha, 0x0087,
4740 "Firmware data: %04x %04x %04x %04x!\n", wcode[0], 4755 "Firmware data: %04x %04x %04x %04x.\n",
4741 wcode[1], wcode[2], wcode[3]); 4756 wcode[0], wcode[1], wcode[2], wcode[3]);
4742 goto fail_fw_integrity; 4757 goto fail_fw_integrity;
4743 } 4758 }
4744 4759
@@ -4751,9 +4766,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4751 /* Validate firmware image size. */ 4766 /* Validate firmware image size. */
4752 fwclen += risc_size * sizeof(uint16_t); 4767 fwclen += risc_size * sizeof(uint16_t);
4753 if (blob->fw->size < fwclen) { 4768 if (blob->fw->size < fwclen) {
4754 qla_printk(KERN_WARNING, ha, 4769 ql_log(ql_log_fatal, vha, 0x0088,
4755 "Unable to verify integrity of firmware image " 4770 "Unable to verify integrity of firmware image "
4756 "(%Zd)!\n", blob->fw->size); 4771 "(%Zd).\n", blob->fw->size);
4757 goto fail_fw_integrity; 4772 goto fail_fw_integrity;
4758 } 4773 }
4759 4774
@@ -4762,10 +4777,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4762 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 4777 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4763 if (wlen > risc_size) 4778 if (wlen > risc_size)
4764 wlen = risc_size; 4779 wlen = risc_size;
4765 4780 ql_dbg(ql_dbg_init, vha, 0x0089,
4766 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4781 "Loading risc segment@ risc addr %x number of "
4767 "addr %x, number of words 0x%x.\n", vha->host_no, 4782 "words 0x%x.\n", risc_addr, wlen);
4768 risc_addr, wlen));
4769 4783
4770 for (i = 0; i < wlen; i++) 4784 for (i = 0; i < wlen; i++)
4771 wcode[i] = swab16(fwcode[i]); 4785 wcode[i] = swab16(fwcode[i]);
@@ -4773,12 +4787,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4773 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4787 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4774 wlen); 4788 wlen);
4775 if (rval) { 4789 if (rval) {
4776 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4790 ql_log(ql_log_fatal, vha, 0x008a,
4777 "segment %d of firmware\n", vha->host_no, 4791 "Failed to load segment %d of firmware.\n",
4778 fragment)); 4792 fragment);
4779 qla_printk(KERN_WARNING, ha,
4780 "[ERROR] Failed to load segment %d of "
4781 "firmware\n", fragment);
4782 break; 4793 break;
4783 } 4794 }
4784 4795
@@ -4814,15 +4825,17 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4814 /* Load firmware blob. */ 4825 /* Load firmware blob. */
4815 blob = qla2x00_request_firmware(vha); 4826 blob = qla2x00_request_firmware(vha);
4816 if (!blob) { 4827 if (!blob) {
4817 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4828 ql_log(ql_log_warn, vha, 0x0090,
4818 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4829 "Fimware image unavailable.\n");
4819 "from: " QLA_FW_URL ".\n"); 4830 ql_log(ql_log_warn, vha, 0x0091,
4831 "Firmware images can be retrieved from: "
4832 QLA_FW_URL ".\n");
4820 4833
4821 return QLA_FUNCTION_FAILED; 4834 return QLA_FUNCTION_FAILED;
4822 } 4835 }
4823 4836
4824 qla_printk(KERN_INFO, ha, 4837 ql_log(ql_log_info, vha, 0x0092,
4825 "FW: Loading via request-firmware...\n"); 4838 "Loading via request-firmware.\n");
4826 4839
4827 rval = QLA_SUCCESS; 4840 rval = QLA_SUCCESS;
4828 4841
@@ -4834,8 +4847,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4834 4847
4835 /* Validate firmware image by checking version. */ 4848 /* Validate firmware image by checking version. */
4836 if (blob->fw->size < 8 * sizeof(uint32_t)) { 4849 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4837 qla_printk(KERN_WARNING, ha, 4850 ql_log(ql_log_fatal, vha, 0x0093,
4838 "Unable to verify integrity of firmware image (%Zd)!\n", 4851 "Unable to verify integrity of firmware image (%Zd).\n",
4839 blob->fw->size); 4852 blob->fw->size);
4840 goto fail_fw_integrity; 4853 goto fail_fw_integrity;
4841 } 4854 }
@@ -4845,11 +4858,12 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4845 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4858 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4846 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4859 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4847 dcode[3] == 0)) { 4860 dcode[3] == 0)) {
4848 qla_printk(KERN_WARNING, ha, 4861 ql_log(ql_log_fatal, vha, 0x0094,
4849 "Unable to verify integrity of firmware image!\n"); 4862 "Unable to verify integrity of firmware image (%Zd).\n",
4850 qla_printk(KERN_WARNING, ha, 4863 blob->fw->size);
4851 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4864 ql_log(ql_log_fatal, vha, 0x0095,
4852 dcode[1], dcode[2], dcode[3]); 4865 "Firmware data: %08x %08x %08x %08x.\n",
4866 dcode[0], dcode[1], dcode[2], dcode[3]);
4853 goto fail_fw_integrity; 4867 goto fail_fw_integrity;
4854 } 4868 }
4855 4869
@@ -4861,9 +4875,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4861 /* Validate firmware image size. */ 4875 /* Validate firmware image size. */
4862 fwclen += risc_size * sizeof(uint32_t); 4876 fwclen += risc_size * sizeof(uint32_t);
4863 if (blob->fw->size < fwclen) { 4877 if (blob->fw->size < fwclen) {
4864 qla_printk(KERN_WARNING, ha, 4878 ql_log(ql_log_fatal, vha, 0x0096,
4865 "Unable to verify integrity of firmware image " 4879 "Unable to verify integrity of firmware image "
4866 "(%Zd)!\n", blob->fw->size); 4880 "(%Zd).\n", blob->fw->size);
4867 4881
4868 goto fail_fw_integrity; 4882 goto fail_fw_integrity;
4869 } 4883 }
@@ -4874,9 +4888,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4874 if (dlen > risc_size) 4888 if (dlen > risc_size)
4875 dlen = risc_size; 4889 dlen = risc_size;
4876 4890
4877 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4891 ql_dbg(ql_dbg_init, vha, 0x0097,
4878 "addr %x, number of dwords 0x%x.\n", vha->host_no, 4892 "Loading risc segment@ risc addr %x "
4879 risc_addr, dlen)); 4893 "number of dwords 0x%x.\n", risc_addr, dlen);
4880 4894
4881 for (i = 0; i < dlen; i++) 4895 for (i = 0; i < dlen; i++)
4882 dcode[i] = swab32(fwcode[i]); 4896 dcode[i] = swab32(fwcode[i]);
@@ -4884,12 +4898,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4884 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4898 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4885 dlen); 4899 dlen);
4886 if (rval) { 4900 if (rval) {
4887 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4901 ql_log(ql_log_fatal, vha, 0x0098,
4888 "segment %d of firmware\n", vha->host_no, 4902 "Failed to load segment %d of firmware.\n",
4889 fragment)); 4903 fragment);
4890 qla_printk(KERN_WARNING, ha,
4891 "[ERROR] Failed to load segment %d of "
4892 "firmware\n", fragment);
4893 break; 4904 break;
4894 } 4905 }
4895 4906
@@ -4953,14 +4964,13 @@ try_blob_fw:
4953 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 4964 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4954 return rval; 4965 return rval;
4955 4966
4956 qla_printk(KERN_ERR, ha, 4967 ql_log(ql_log_info, vha, 0x0099,
4957 "FW: Attempting to fallback to golden firmware...\n"); 4968 "Attempting to fallback to golden firmware.\n");
4958 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 4969 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4959 if (rval != QLA_SUCCESS) 4970 if (rval != QLA_SUCCESS)
4960 return rval; 4971 return rval;
4961 4972
4962 qla_printk(KERN_ERR, ha, 4973 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
4963 "FW: Please update operational firmware...\n");
4964 ha->flags.running_gold_fw = 1; 4974 ha->flags.running_gold_fw = 1;
4965 4975
4966 return rval; 4976 return rval;
@@ -4987,8 +4997,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4987 continue; 4997 continue;
4988 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 4998 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4989 continue; 4999 continue;
4990 qla_printk(KERN_INFO, ha, 5000 ql_log(ql_log_info, vha, 0x8015,
4991 "Attempting retry of stop-firmware command...\n"); 5001 "Attempting retry of stop-firmware command.\n");
4992 ret = qla2x00_stop_firmware(vha); 5002 ret = qla2x00_stop_firmware(vha);
4993 } 5003 }
4994} 5004}
@@ -5023,10 +5033,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
5023 /* Login to SNS first */ 5033 /* Login to SNS first */
5024 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 5034 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
5025 if (mb[0] != MBS_COMMAND_COMPLETE) { 5035 if (mb[0] != MBS_COMMAND_COMPLETE) {
5026 DEBUG15(qla_printk(KERN_INFO, ha, 5036 ql_dbg(ql_dbg_init, vha, 0x0103,
5027 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 5037 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
5028 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS, 5038 "mb[6]=%x mb[7]=%x.\n",
5029 mb[0], mb[1], mb[2], mb[6], mb[7])); 5039 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
5030 return (QLA_FUNCTION_FAILED); 5040 return (QLA_FUNCTION_FAILED);
5031 } 5041 }
5032 5042
@@ -5146,19 +5156,23 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5146 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 5156 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5147 chksum += le32_to_cpu(*dptr++); 5157 chksum += le32_to_cpu(*dptr++);
5148 5158
5149 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 5159 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
5150 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 5160 "Contents of NVRAM:\n");
5161 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
5162 (uint8_t *)nv, ha->nvram_size);
5151 5163
5152 /* Bad NVRAM data, set defaults parameters. */ 5164 /* Bad NVRAM data, set defaults parameters. */
5153 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 5165 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5154 || nv->id[3] != ' ' || 5166 || nv->id[3] != ' ' ||
5155 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5167 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5156 /* Reset NVRAM data. */ 5168 /* Reset NVRAM data. */
5157 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 5169 ql_log(ql_log_info, vha, 0x0073,
5158 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 5170 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
5171 "version=0x%x.\n", chksum, nv->id[0],
5159 le16_to_cpu(nv->nvram_version)); 5172 le16_to_cpu(nv->nvram_version));
5160 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 5173 ql_log(ql_log_info, vha, 0x0074,
5161 "invalid -- WWPN) defaults.\n"); 5174 "Falling back to functioning (yet invalid -- WWPN) "
5175 "defaults.\n");
5162 5176
5163 /* 5177 /*
5164 * Set default initialization control block. 5178 * Set default initialization control block.
@@ -5350,12 +5364,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5350 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5364 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5351 ha->zio_mode = QLA_ZIO_MODE_6; 5365 ha->zio_mode = QLA_ZIO_MODE_6;
5352 5366
5353 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 5367 ql_log(ql_log_info, vha, 0x0075,
5354 "(%d us).\n", vha->host_no, ha->zio_mode,
5355 ha->zio_timer * 100));
5356 qla_printk(KERN_INFO, ha,
5357 "ZIO mode %d enabled; timer delay (%d us).\n", 5368 "ZIO mode %d enabled; timer delay (%d us).\n",
5358 ha->zio_mode, ha->zio_timer * 100); 5369 ha->zio_mode,
5370 ha->zio_timer * 100);
5359 5371
5360 icb->firmware_options_2 |= cpu_to_le32( 5372 icb->firmware_options_2 |= cpu_to_le32(
5361 (uint32_t)ha->zio_mode); 5373 (uint32_t)ha->zio_mode);
@@ -5364,8 +5376,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5364 } 5376 }
5365 5377
5366 if (rval) { 5378 if (rval) {
5367 DEBUG2_3(printk(KERN_WARNING 5379 ql_log(ql_log_warn, vha, 0x0076,
5368 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 5380 "NVRAM configuration failed.\n");
5369 } 5381 }
5370 return (rval); 5382 return (rval);
5371} 5383}
@@ -5388,9 +5400,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5388 5400
5389 status = qla2x00_fw_ready(vha); 5401 status = qla2x00_fw_ready(vha);
5390 if (!status) { 5402 if (!status) {
5391 qla_printk(KERN_INFO, ha, 5403 ql_log(ql_log_info, vha, 0x803c,
5392 "%s(): Start configure loop, " 5404 "Start configure loop, status =%d.\n", status);
5393 "status = %d\n", __func__, status);
5394 5405
5395 /* Issue a marker after FW becomes ready. */ 5406 /* Issue a marker after FW becomes ready. */
5396 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 5407 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -5412,9 +5423,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5412 if ((vha->device_flags & DFLG_NO_CABLE)) 5423 if ((vha->device_flags & DFLG_NO_CABLE))
5413 status = 0; 5424 status = 0;
5414 5425
5415 qla_printk(KERN_INFO, ha, 5426 ql_log(ql_log_info, vha, 0x803d,
5416 "%s(): Configure loop done, status = 0x%x\n", 5427 "Configure loop done, status = 0x%x.\n", status);
5417 __func__, status);
5418 } 5428 }
5419 5429
5420 if (!status) { 5430 if (!status) {
@@ -5450,9 +5460,9 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5450 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 5460 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5451 &ha->fce_bufs); 5461 &ha->fce_bufs);
5452 if (rval) { 5462 if (rval) {
5453 qla_printk(KERN_WARNING, ha, 5463 ql_log(ql_log_warn, vha, 0x803e,
5454 "Unable to reinitialize FCE " 5464 "Unable to reinitialize FCE (%d).\n",
5455 "(%d).\n", rval); 5465 rval);
5456 ha->flags.fce_enabled = 0; 5466 ha->flags.fce_enabled = 0;
5457 } 5467 }
5458 } 5468 }
@@ -5462,17 +5472,16 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5462 rval = qla2x00_enable_eft_trace(vha, 5472 rval = qla2x00_enable_eft_trace(vha,
5463 ha->eft_dma, EFT_NUM_BUFFERS); 5473 ha->eft_dma, EFT_NUM_BUFFERS);
5464 if (rval) { 5474 if (rval) {
5465 qla_printk(KERN_WARNING, ha, 5475 ql_log(ql_log_warn, vha, 0x803f,
5466 "Unable to reinitialize EFT " 5476 "Unable to reinitialize EFT (%d).\n",
5467 "(%d).\n", rval); 5477 rval);
5468 } 5478 }
5469 } 5479 }
5470 } 5480 }
5471 5481
5472 if (!status) { 5482 if (!status) {
5473 DEBUG(printk(KERN_INFO 5483 ql_dbg(ql_dbg_taskm, vha, 0x8040,
5474 "qla82xx_restart_isp(%ld): succeeded.\n", 5484 "qla82xx_restart_isp succeeded.\n");
5475 vha->host_no));
5476 5485
5477 spin_lock_irqsave(&ha->vport_slock, flags); 5486 spin_lock_irqsave(&ha->vport_slock, flags);
5478 list_for_each_entry(vp, &ha->vp_list, list) { 5487 list_for_each_entry(vp, &ha->vp_list, list) {
@@ -5489,8 +5498,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5489 spin_unlock_irqrestore(&ha->vport_slock, flags); 5498 spin_unlock_irqrestore(&ha->vport_slock, flags);
5490 5499
5491 } else { 5500 } else {
5492 qla_printk(KERN_INFO, ha, 5501 ql_log(ql_log_warn, vha, 0x8041,
5493 "qla82xx_restart_isp: **** FAILED ****\n"); 5502 "qla82xx_restart_isp **** FAILED ****.\n");
5494 } 5503 }
5495 5504
5496 return status; 5505 return status;
@@ -5640,9 +5649,8 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5640 if (ret == QLA_SUCCESS) 5649 if (ret == QLA_SUCCESS)
5641 fcport->fcp_prio = priority; 5650 fcport->fcp_prio = priority;
5642 else 5651 else
5643 DEBUG2(printk(KERN_WARNING 5652 ql_dbg(ql_dbg_user, vha, 0x704f,
5644 "scsi(%ld): Unable to activate fcp priority, " 5653 "Unable to activate fcp priority, ret=0x%x.\n", ret);
5645 " ret=0x%x\n", vha->host_no, ret));
5646 5654
5647 return ret; 5655 return ret;
5648} 5656}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4c8167e11f69..d2e904bc21c0 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -94,11 +94,11 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
94 94
95 /* Don't print state transitions during initial allocation of fcport */ 95 /* Don't print state transitions during initial allocation of fcport */
96 if (old_state && old_state != state) { 96 if (old_state && old_state != state) {
97 DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw, 97 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
98 "scsi(%ld): FCPort state transitioned from %s to %s - " 98 "FCPort state transitioned from %s to %s - "
99 "portid=%02x%02x%02x.\n", fcport->vha->host_no, 99 "portid=%02x%02x%02x.\n",
100 port_state_str[old_state], port_state_str[state], 100 port_state_str[old_state], port_state_str[state],
101 fcport->d_id.b.domain, fcport->d_id.b.area, 101 fcport->d_id.b.domain, fcport->d_id.b.area,
102 fcport->d_id.b.al_pa)); 102 fcport->d_id.b.al_pa);
103 } 103 }
104} 104}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7bac3cd109d6..49d6906af886 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
150 150
151 /* We only support T10 DIF right now */ 151 /* We only support T10 DIF right now */
152 if (guard != SHOST_DIX_GUARD_CRC) { 152 if (guard != SHOST_DIX_GUARD_CRC) {
153 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard)); 153 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
154 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
154 return 0; 155 return 0;
155 } 156 }
156 157
@@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp)
343 344
344 /* Send marker if required */ 345 /* Send marker if required */
345 if (vha->marker_needed != 0) { 346 if (vha->marker_needed != 0) {
346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) 347 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 != QLA_SUCCESS) 348 QLA_SUCCESS) {
348 return (QLA_FUNCTION_FAILED); 349 return (QLA_FUNCTION_FAILED);
350 }
349 vha->marker_needed = 0; 351 vha->marker_needed = 0;
350 } 352 }
351 353
@@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
490 mrk24 = NULL; 492 mrk24 = NULL;
491 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0); 493 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
492 if (mrk == NULL) { 494 if (mrk == NULL) {
493 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 495 ql_log(ql_log_warn, base_vha, 0x3026,
494 __func__, base_vha->host_no)); 496 "Failed to allocate Marker IOCB.\n");
495 497
496 return (QLA_FUNCTION_FAILED); 498 return (QLA_FUNCTION_FAILED);
497 } 499 }
@@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
547 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 549 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
548 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 550 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
549 551
550 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 552 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
551 DEBUG5(qla2x00_dump_buffer( 553 "IOCB data:\n");
552 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE)); 554 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
555 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
553 556
554 /* Adjust ring index. */ 557 /* Adjust ring index. */
555 req->ring_index++; 558 req->ring_index++;
@@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
604 * Returns the number of IOCB entries needed to store @dsds. 607 * Returns the number of IOCB entries needed to store @dsds.
605 */ 608 */
606inline uint16_t 609inline uint16_t
607qla24xx_calc_iocbs(uint16_t dsds) 610qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
608{ 611{
609 uint16_t iocbs; 612 uint16_t iocbs;
610 613
@@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds)
614 if ((dsds - 1) % 5) 617 if ((dsds - 1) % 5)
615 iocbs++; 618 iocbs++;
616 } 619 }
617 DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
618 __func__, iocbs));
619 return iocbs; 620 return iocbs;
620} 621}
621 622
@@ -712,6 +713,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
712 unsigned int protcnt) 713 unsigned int protcnt)
713{ 714{
714 struct sd_dif_tuple *spt; 715 struct sd_dif_tuple *spt;
716 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
715 unsigned char op = scsi_get_prot_op(cmd); 717 unsigned char op = scsi_get_prot_op(cmd);
716 718
717 switch (scsi_get_prot_type(cmd)) { 719 switch (scsi_get_prot_type(cmd)) {
@@ -768,9 +770,9 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
768 op == SCSI_PROT_WRITE_PASS)) { 770 op == SCSI_PROT_WRITE_PASS)) {
769 spt = page_address(sg_page(scsi_prot_sglist(cmd))) + 771 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
770 scsi_prot_sglist(cmd)[0].offset; 772 scsi_prot_sglist(cmd)[0].offset;
771 DEBUG18(printk(KERN_DEBUG 773 ql_dbg(ql_dbg_io, vha, 0x3008,
772 "%s(): LBA from user %p, lba = 0x%x\n", 774 "LBA from user %p, lba = 0x%x for cmd=%p.\n",
773 __func__, spt, (int)spt->ref_tag)); 775 spt, (int)spt->ref_tag, cmd);
774 pkt->ref_tag = swab32(spt->ref_tag); 776 pkt->ref_tag = swab32(spt->ref_tag);
775 pkt->app_tag_mask[0] = 0x0; 777 pkt->app_tag_mask[0] = 0x0;
776 pkt->app_tag_mask[1] = 0x0; 778 pkt->app_tag_mask[1] = 0x0;
@@ -789,11 +791,11 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
789 break; 791 break;
790 } 792 }
791 793
792 DEBUG18(printk(KERN_DEBUG 794 ql_dbg(ql_dbg_io, vha, 0x3009,
793 "%s(): Setting protection Tags: (BIG) ref tag = 0x%x," 795 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
794 " app tag = 0x%x, prot SG count %d , cmd lba 0x%x," 796 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
795 " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt, 797 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
796 (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd))); 798 scsi_get_prot_type(cmd), cmd);
797} 799}
798 800
799 801
@@ -809,6 +811,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
809 uint32_t *cur_dsd = dsd; 811 uint32_t *cur_dsd = dsd;
810 int i; 812 int i;
811 uint16_t used_dsds = tot_dsds; 813 uint16_t used_dsds = tot_dsds;
814 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
812 815
813 uint8_t *cp; 816 uint8_t *cp;
814 817
@@ -853,9 +856,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
853 cur_dsd = (uint32_t *)next_dsd; 856 cur_dsd = (uint32_t *)next_dsd;
854 } 857 }
855 sle_dma = sg_dma_address(sg); 858 sle_dma = sg_dma_address(sg);
856 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x," 859 ql_dbg(ql_dbg_io, vha, 0x300a,
857 " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma), 860 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
858 MSD(sle_dma), sg_dma_len(sg))); 861 cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
862 sp->cmd);
859 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 863 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
860 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 864 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
861 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 865 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
@@ -863,8 +867,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
863 867
864 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 868 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
865 cp = page_address(sg_page(sg)) + sg->offset; 869 cp = page_address(sg_page(sg)) + sg->offset;
866 DEBUG18(printk("%s(): User Data buffer= %p:\n", 870 ql_dbg(ql_dbg_io, vha, 0x300b,
867 __func__ , cp)); 871 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
868 } 872 }
869 } 873 }
870 /* Null termination */ 874 /* Null termination */
@@ -888,7 +892,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
888 struct scsi_cmnd *cmd; 892 struct scsi_cmnd *cmd;
889 uint32_t *cur_dsd = dsd; 893 uint32_t *cur_dsd = dsd;
890 uint16_t used_dsds = tot_dsds; 894 uint16_t used_dsds = tot_dsds;
891 895 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
892 uint8_t *cp; 896 uint8_t *cp;
893 897
894 898
@@ -935,10 +939,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
935 } 939 }
936 sle_dma = sg_dma_address(sg); 940 sle_dma = sg_dma_address(sg);
937 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 941 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
938 DEBUG18(printk(KERN_DEBUG 942 ql_dbg(ql_dbg_io, vha, 0x3027,
939 "%s(): %p, sg entry %d - addr =0x%x" 943 "%s(): %p, sg_entry %d - "
940 "0x%x, len =%d\n", __func__ , cur_dsd, i, 944 "addr=0x%x0x%x, len=%d.\n",
941 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg))); 945 __func__, cur_dsd, i,
946 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
942 } 947 }
943 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 948 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
944 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 949 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -946,8 +951,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
946 951
947 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 952 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
948 cp = page_address(sg_page(sg)) + sg->offset; 953 cp = page_address(sg_page(sg)) + sg->offset;
949 DEBUG18(printk("%s(): Protection Data buffer = %p:\n", 954 ql_dbg(ql_dbg_io, vha, 0x3028,
950 __func__ , cp)); 955 "%s(): Protection Data buffer = %p.\n", __func__,
956 cp);
951 } 957 }
952 avail_dsds--; 958 avail_dsds--;
953 } 959 }
@@ -996,22 +1002,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
996 *((uint32_t *)(&cmd_pkt->entry_type)) = 1002 *((uint32_t *)(&cmd_pkt->entry_type)) =
997 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); 1003 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
998 1004
1005 vha = sp->fcport->vha;
1006 ha = vha->hw;
1007
999 /* No data transfer */ 1008 /* No data transfer */
1000 data_bytes = scsi_bufflen(cmd); 1009 data_bytes = scsi_bufflen(cmd);
1001 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1010 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1002 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1003 __func__, data_bytes));
1004 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1011 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1005 return QLA_SUCCESS; 1012 return QLA_SUCCESS;
1006 } 1013 }
1007 1014
1008 vha = sp->fcport->vha;
1009 ha = vha->hw;
1010
1011 DEBUG18(printk(KERN_DEBUG
1012 "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
1013 vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
1014
1015 cmd_pkt->vp_index = sp->fcport->vp_idx; 1015 cmd_pkt->vp_index = sp->fcport->vp_idx;
1016 1016
1017 /* Set transfer direction */ 1017 /* Set transfer direction */
@@ -1056,8 +1056,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1056 1056
1057 /* Determine SCSI command length -- align to 4 byte boundary */ 1057 /* Determine SCSI command length -- align to 4 byte boundary */
1058 if (cmd->cmd_len > 16) { 1058 if (cmd->cmd_len > 16) {
1059 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1060 __func__));
1061 additional_fcpcdb_len = cmd->cmd_len - 16; 1059 additional_fcpcdb_len = cmd->cmd_len - 16;
1062 if ((cmd->cmd_len % 4) != 0) { 1060 if ((cmd->cmd_len % 4) != 0) {
1063 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1061 /* SCSI cmd > 16 bytes must be multiple of 4 */
@@ -1108,11 +1106,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1108 1106
1109 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1107 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1110 1108
1111 DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1112 "entries %d, data bytes %d, Protection entries %d\n",
1113 __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1114 data_bytes, tot_prot_dsds));
1115
1116 /* Compute dif len and adjust data len to incude protection */ 1109 /* Compute dif len and adjust data len to incude protection */
1117 total_bytes = data_bytes; 1110 total_bytes = data_bytes;
1118 dif_bytes = 0; 1111 dif_bytes = 0;
@@ -1150,14 +1143,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1150 additional_fcpcdb_len); 1143 additional_fcpcdb_len);
1151 *fcp_dl = htonl(total_bytes); 1144 *fcp_dl = htonl(total_bytes);
1152 1145
1153 DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1154 " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1155 vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1156 crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1157
1158 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1146 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1159 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1160 __func__, data_bytes));
1161 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1147 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1162 return QLA_SUCCESS; 1148 return QLA_SUCCESS;
1163 } 1149 }
@@ -1182,8 +1168,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1182 return QLA_SUCCESS; 1168 return QLA_SUCCESS;
1183 1169
1184crc_queuing_error: 1170crc_queuing_error:
1185 DEBUG18(qla_printk(KERN_INFO, ha,
1186 "CMD sent FAILED crc_q error:sp = %p\n", sp));
1187 /* Cleanup will be performed by the caller */ 1171 /* Cleanup will be performed by the caller */
1188 1172
1189 return QLA_FUNCTION_FAILED; 1173 return QLA_FUNCTION_FAILED;
@@ -1225,8 +1209,8 @@ qla24xx_start_scsi(srb_t *sp)
1225 1209
1226 /* Send marker if required */ 1210 /* Send marker if required */
1227 if (vha->marker_needed != 0) { 1211 if (vha->marker_needed != 0) {
1228 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) 1212 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1229 != QLA_SUCCESS) 1213 QLA_SUCCESS)
1230 return QLA_FUNCTION_FAILED; 1214 return QLA_FUNCTION_FAILED;
1231 vha->marker_needed = 0; 1215 vha->marker_needed = 0;
1232 } 1216 }
@@ -1243,8 +1227,9 @@ qla24xx_start_scsi(srb_t *sp)
1243 if (!req->outstanding_cmds[handle]) 1227 if (!req->outstanding_cmds[handle])
1244 break; 1228 break;
1245 } 1229 }
1246 if (index == MAX_OUTSTANDING_COMMANDS) 1230 if (index == MAX_OUTSTANDING_COMMANDS) {
1247 goto queuing_error; 1231 goto queuing_error;
1232 }
1248 1233
1249 /* Map the sg table so we have an accurate count of sg entries needed */ 1234 /* Map the sg table so we have an accurate count of sg entries needed */
1250 if (scsi_sg_count(cmd)) { 1235 if (scsi_sg_count(cmd)) {
@@ -1256,8 +1241,7 @@ qla24xx_start_scsi(srb_t *sp)
1256 nseg = 0; 1241 nseg = 0;
1257 1242
1258 tot_dsds = nseg; 1243 tot_dsds = nseg;
1259 1244 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1260 req_cnt = qla24xx_calc_iocbs(tot_dsds);
1261 if (req->cnt < (req_cnt + 2)) { 1245 if (req->cnt < (req_cnt + 2)) {
1262 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 1246 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1263 1247
@@ -1322,7 +1306,6 @@ qla24xx_start_scsi(srb_t *sp)
1322 /* Specify response queue number where completion should happen */ 1306 /* Specify response queue number where completion should happen */
1323 cmd_pkt->entry_status = (uint8_t) rsp->id; 1307 cmd_pkt->entry_status = (uint8_t) rsp->id;
1324 wmb(); 1308 wmb();
1325
1326 /* Adjust ring index. */ 1309 /* Adjust ring index. */
1327 req->ring_index++; 1310 req->ring_index++;
1328 if (req->ring_index == req->length) { 1311 if (req->ring_index == req->length) {
@@ -1534,9 +1517,6 @@ queuing_error:
1534 /* Cleanup will be performed by the caller (queuecommand) */ 1517 /* Cleanup will be performed by the caller (queuecommand) */
1535 1518
1536 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1519 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537
1538 DEBUG18(qla_printk(KERN_INFO, ha,
1539 "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1540 return QLA_FUNCTION_FAILED; 1520 return QLA_FUNCTION_FAILED;
1541} 1521}
1542 1522
@@ -1581,8 +1561,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1581 if (!req->outstanding_cmds[handle]) 1561 if (!req->outstanding_cmds[handle])
1582 break; 1562 break;
1583 } 1563 }
1584 if (index == MAX_OUTSTANDING_COMMANDS) 1564 if (index == MAX_OUTSTANDING_COMMANDS) {
1565 ql_log(ql_log_warn, vha, 0x700b,
1566 "No room on oustanding cmd array.\n");
1585 goto queuing_error; 1567 goto queuing_error;
1568 }
1586 1569
1587 /* Prep command array. */ 1570 /* Prep command array. */
1588 req->current_outstanding_cmd = handle; 1571 req->current_outstanding_cmd = handle;
@@ -1999,8 +1982,11 @@ qla2x00_start_sp(srb_t *sp)
1999 rval = QLA_FUNCTION_FAILED; 1982 rval = QLA_FUNCTION_FAILED;
2000 spin_lock_irqsave(&ha->hardware_lock, flags); 1983 spin_lock_irqsave(&ha->hardware_lock, flags);
2001 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); 1984 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2002 if (!pkt) 1985 if (!pkt) {
1986 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
1987 "qla2x00_alloc_iocbs failed.\n");
2003 goto done; 1988 goto done;
1989 }
2004 1990
2005 rval = QLA_SUCCESS; 1991 rval = QLA_SUCCESS;
2006 switch (ctx->type) { 1992 switch (ctx->type) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ae8e298746ba..b16b7725dee0 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -45,7 +45,7 @@ qla2100_intr_handler(int irq, void *dev_id)
45 rsp = (struct rsp_que *) dev_id; 45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) { 46 if (!rsp) {
47 printk(KERN_INFO 47 printk(KERN_INFO
48 "%s(): NULL response queue pointer\n", __func__); 48 "%s(): NULL response queue pointer.\n", __func__);
49 return (IRQ_NONE); 49 return (IRQ_NONE);
50 } 50 }
51 51
@@ -91,9 +91,9 @@ qla2100_intr_handler(int irq, void *dev_id)
91 qla2x00_async_event(vha, rsp, mb); 91 qla2x00_async_event(vha, rsp, mb);
92 } else { 92 } else {
93 /*EMPTY*/ 93 /*EMPTY*/
94 DEBUG2(printk("scsi(%ld): Unrecognized " 94 ql_dbg(ql_dbg_async, vha, 0x5025,
95 "interrupt type (%d).\n", 95 "Unrecognized interrupt type (%d).\n",
96 vha->host_no, mb[0])); 96 mb[0]);
97 } 97 }
98 /* Release mailbox registers. */ 98 /* Release mailbox registers. */
99 WRT_REG_WORD(&reg->semaphore, 0); 99 WRT_REG_WORD(&reg->semaphore, 0);
@@ -142,7 +142,7 @@ qla2300_intr_handler(int irq, void *dev_id)
142 rsp = (struct rsp_que *) dev_id; 142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) { 143 if (!rsp) {
144 printk(KERN_INFO 144 printk(KERN_INFO
145 "%s(): NULL response queue pointer\n", __func__); 145 "%s(): NULL response queue pointer.\n", __func__);
146 return (IRQ_NONE); 146 return (IRQ_NONE);
147 } 147 }
148 148
@@ -160,11 +160,13 @@ qla2300_intr_handler(int irq, void *dev_id)
160 160
161 hccr = RD_REG_WORD(&reg->hccr); 161 hccr = RD_REG_WORD(&reg->hccr);
162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163 qla_printk(KERN_INFO, ha, "Parity error -- " 163 ql_log(ql_log_warn, vha, 0x5026,
164 "HCCR=%x, Dumping firmware!\n", hccr); 164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr);
165 else 166 else
166 qla_printk(KERN_INFO, ha, "RISC paused -- " 167 ql_log(ql_log_warn, vha, 0x5027,
167 "HCCR=%x, Dumping firmware!\n", hccr); 168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr);
168 170
169 /* 171 /*
170 * Issue a "HARD" reset in order for the RISC 172 * Issue a "HARD" reset in order for the RISC
@@ -213,9 +215,8 @@ qla2300_intr_handler(int irq, void *dev_id)
213 qla2x00_async_event(vha, rsp, mb); 215 qla2x00_async_event(vha, rsp, mb);
214 break; 216 break;
215 default: 217 default:
216 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 218 ql_dbg(ql_dbg_async, vha, 0x5028,
217 "(%d).\n", 219 "Unrecognized interrupt type (%d).\n", stat & 0xff);
218 vha->host_no, stat & 0xff));
219 break; 220 break;
220 } 221 }
221 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -262,11 +263,11 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
262 } 263 }
263 264
264 if (ha->mcp) { 265 if (ha->mcp) {
265 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 266 ql_dbg(ql_dbg_async, vha, 0x5000,
266 __func__, vha->host_no, ha->mcp->mb[0])); 267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
267 } else { 268 } else {
268 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 269 ql_dbg(ql_dbg_async, vha, 0x5001,
269 __func__, vha->host_no)); 270 "MBX pointer ERROR.\n");
270 } 271 }
271} 272}
272 273
@@ -285,22 +286,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
285 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
286 mb[cnt] = RD_REG_WORD(wptr); 287 mb[cnt] = RD_REG_WORD(wptr);
287 288
288 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 289 ql_dbg(ql_dbg_async, vha, 0x5021,
289 "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, 290 "Inter-Driver Commucation %s -- "
290 event[aen & 0xff], 291 "%04x %04x %04x %04x %04x %04x %04x.\n",
291 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); 292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
293 mb[4], mb[5], mb[6]);
292 294
293 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 295 /* Acknowledgement needed? [Notify && non-zero timeout]. */
294 timeout = (descr >> 8) & 0xf; 296 timeout = (descr >> 8) & 0xf;
295 if (aen != MBA_IDC_NOTIFY || !timeout) 297 if (aen != MBA_IDC_NOTIFY || !timeout)
296 return; 298 return;
297 299
298 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 300 ql_dbg(ql_dbg_async, vha, 0x5022,
299 "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); 301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
302 vha->host_no, event[aen & 0xff], timeout);
300 303
301 rval = qla2x00_post_idc_ack_work(vha, mb); 304 rval = qla2x00_post_idc_ack_work(vha, mb);
302 if (rval != QLA_SUCCESS) 305 if (rval != QLA_SUCCESS)
303 qla_printk(KERN_WARNING, vha->hw, 306 ql_log(ql_log_warn, vha, 0x5023,
304 "IDC failed to post ACK.\n"); 307 "IDC failed to post ACK.\n");
305} 308}
306 309
@@ -393,15 +396,15 @@ skip_rio:
393 break; 396 break;
394 397
395 case MBA_RESET: /* Reset */ 398 case MBA_RESET: /* Reset */
396 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", 399 ql_dbg(ql_dbg_async, vha, 0x5002,
397 vha->host_no)); 400 "Asynchronous RESET.\n");
398 401
399 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
400 break; 403 break;
401 404
402 case MBA_SYSTEM_ERR: /* System Error */ 405 case MBA_SYSTEM_ERR: /* System Error */
403 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0; 406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
404 qla_printk(KERN_INFO, ha, 407 ql_log(ql_log_warn, vha, 0x5003,
405 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
406 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
407 410
@@ -409,7 +412,7 @@ skip_rio:
409 412
410 if (IS_FWI2_CAPABLE(ha)) { 413 if (IS_FWI2_CAPABLE(ha)) {
411 if (mb[1] == 0 && mb[2] == 0) { 414 if (mb[1] == 0 && mb[2] == 0) {
412 qla_printk(KERN_ERR, ha, 415 ql_log(ql_log_fatal, vha, 0x5004,
413 "Unrecoverable Hardware Error: adapter " 416 "Unrecoverable Hardware Error: adapter "
414 "marked OFFLINE!\n"); 417 "marked OFFLINE!\n");
415 vha->flags.online = 0; 418 vha->flags.online = 0;
@@ -422,7 +425,7 @@ skip_rio:
422 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
423 } 426 }
424 } else if (mb[1] == 0) { 427 } else if (mb[1] == 0) {
425 qla_printk(KERN_INFO, ha, 428 ql_log(ql_log_fatal, vha, 0x5005,
426 "Unrecoverable Hardware Error: adapter marked " 429 "Unrecoverable Hardware Error: adapter marked "
427 "OFFLINE!\n"); 430 "OFFLINE!\n");
428 vha->flags.online = 0; 431 vha->flags.online = 0;
@@ -431,31 +434,27 @@ skip_rio:
431 break; 434 break;
432 435
433 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
434 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", 437 ql_log(ql_log_warn, vha, 0x5006,
435 vha->host_no, mb[1])); 438 "ISP Request Transfer Error (%x).\n", mb[1]);
436 qla_printk(KERN_WARNING, ha,
437 "ISP Request Transfer Error (%x).\n", mb[1]);
438 439
439 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
440 break; 441 break;
441 442
442 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
443 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 444 ql_log(ql_log_warn, vha, 0x5007,
444 vha->host_no)); 445 "ISP Response Transfer Error.\n");
445 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
446 446
447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
448 break; 448 break;
449 449
450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
451 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 451 ql_dbg(ql_dbg_async, vha, 0x5008,
452 vha->host_no)); 452 "Asynchronous WAKEUP_THRES.\n");
453 break; 453 break;
454 454
455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
456 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no, 456 ql_log(ql_log_info, vha, 0x5009,
457 mb[1])); 457 "LIP occurred (%x).\n", mb[1]);
458 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
459 458
460 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
461 atomic_set(&vha->loop_state, LOOP_DOWN); 460 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -488,10 +487,8 @@ skip_rio:
488 ha->link_data_rate = mb[1]; 487 ha->link_data_rate = mb[1];
489 } 488 }
490 489
491 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 490 ql_log(ql_log_info, vha, 0x500a,
492 vha->host_no, link_speed)); 491 "LOOP UP detected (%s Gbps).\n", link_speed);
493 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
494 link_speed);
495 492
496 vha->flags.management_server_logged_in = 0; 493 vha->flags.management_server_logged_in = 0;
497 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -500,12 +497,9 @@ skip_rio:
500 case MBA_LOOP_DOWN: /* Loop Down Event */ 497 case MBA_LOOP_DOWN: /* Loop Down Event */
501 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; 498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
502 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; 499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
503 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 500 ql_log(ql_log_info, vha, 0x500b,
504 "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], 501 "LOOP DOWN detected (%x %x %x %x).\n",
505 mbx)); 502 mb[1], mb[2], mb[3], mbx);
506 qla_printk(KERN_INFO, ha,
507 "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
508 mbx);
509 503
510 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
511 atomic_set(&vha->loop_state, LOOP_DOWN); 505 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -525,9 +519,7 @@ skip_rio:
525 break; 519 break;
526 520
527 case MBA_LIP_RESET: /* LIP reset occurred */ 521 case MBA_LIP_RESET: /* LIP reset occurred */
528 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 522 ql_log(ql_log_info, vha, 0x500c,
529 vha->host_no, mb[1]));
530 qla_printk(KERN_INFO, ha,
531 "LIP reset occurred (%x).\n", mb[1]); 523 "LIP reset occurred (%x).\n", mb[1]);
532 524
533 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -554,14 +546,15 @@ skip_rio:
554 break; 546 break;
555 547
556 if (IS_QLA8XXX_TYPE(ha)) { 548 if (IS_QLA8XXX_TYPE(ha)) {
557 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " 549 ql_dbg(ql_dbg_async, vha, 0x500d,
558 "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); 550 "DCBX Completed -- %04x %04x %04x.\n",
551 mb[1], mb[2], mb[3]);
559 if (ha->notify_dcbx_comp) 552 if (ha->notify_dcbx_comp)
560 complete(&ha->dcbx_comp); 553 complete(&ha->dcbx_comp);
561 554
562 } else 555 } else
563 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE " 556 ql_dbg(ql_dbg_async, vha, 0x500e,
564 "received.\n", vha->host_no)); 557 "Asynchronous P2P MODE received.\n");
565 558
566 /* 559 /*
567 * Until there's a transition from loop down to loop up, treat 560 * Until there's a transition from loop down to loop up, treat
@@ -594,10 +587,7 @@ skip_rio:
594 if (IS_QLA2100(ha)) 587 if (IS_QLA2100(ha))
595 break; 588 break;
596 589
597 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 590 ql_log(ql_log_info, vha, 0x500f,
598 "received.\n",
599 vha->host_no));
600 qla_printk(KERN_INFO, ha,
601 "Configuration change detected: value=%x.\n", mb[1]); 591 "Configuration change detected: value=%x.\n", mb[1]);
602 592
603 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -640,11 +630,9 @@ skip_rio:
640 630
641 /* Global event -- port logout or port unavailable. */ 631 /* Global event -- port logout or port unavailable. */
642 if (mb[1] == 0xffff && mb[2] == 0x7) { 632 if (mb[1] == 0xffff && mb[2] == 0x7) {
643 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 633 ql_dbg(ql_dbg_async, vha, 0x5010,
644 vha->host_no)); 634 "Port unavailable %04x %04x %04x.\n",
645 DEBUG(printk(KERN_INFO 635 mb[1], mb[2], mb[3]);
646 "scsi(%ld): Port unavailable %04x %04x %04x.\n",
647 vha->host_no, mb[1], mb[2], mb[3]));
648 636
649 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
650 atomic_set(&vha->loop_state, LOOP_DOWN); 638 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -674,17 +662,15 @@ skip_rio:
674 atomic_set(&vha->loop_down_timer, 0); 662 atomic_set(&vha->loop_down_timer, 0);
675 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 663 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
676 atomic_read(&vha->loop_state) != LOOP_DEAD) { 664 atomic_read(&vha->loop_state) != LOOP_DEAD) {
677 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 665 ql_dbg(ql_dbg_async, vha, 0x5011,
678 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1], 666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
679 mb[2], mb[3])); 667 mb[1], mb[2], mb[3]);
680 break; 668 break;
681 } 669 }
682 670
683 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 671 ql_dbg(ql_dbg_async, vha, 0x5012,
684 vha->host_no)); 672 "Port database changed %04x %04x %04x.\n",
685 DEBUG(printk(KERN_INFO 673 mb[1], mb[2], mb[3]);
686 "scsi(%ld): Port database changed %04x %04x %04x.\n",
687 vha->host_no, mb[1], mb[2], mb[3]));
688 674
689 /* 675 /*
690 * Mark all devices as missing so we will login again. 676 * Mark all devices as missing so we will login again.
@@ -707,20 +693,17 @@ skip_rio:
707 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
708 break; 694 break;
709 695
710 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 696 ql_dbg(ql_dbg_async, vha, 0x5013,
711 vha->host_no)); 697 "RSCN database changed -- %04x %04x %04x.\n",
712 DEBUG(printk(KERN_INFO 698 mb[1], mb[2], mb[3]);
713 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
714 vha->host_no, mb[1], mb[2], mb[3]));
715 699
716 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
717 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
718 | vha->d_id.b.al_pa; 702 | vha->d_id.b.al_pa;
719 if (rscn_entry == host_pid) { 703 if (rscn_entry == host_pid) {
720 DEBUG(printk(KERN_INFO 704 ql_dbg(ql_dbg_async, vha, 0x5014,
721 "scsi(%ld): Ignoring RSCN update to local host " 705 "Ignoring RSCN update to local host "
722 "port ID (%06x)\n", 706 "port ID (%06x).\n", host_pid);
723 vha->host_no, host_pid));
724 break; 707 break;
725 } 708 }
726 709
@@ -747,8 +730,8 @@ skip_rio:
747 730
748 /* case MBA_RIO_RESPONSE: */ 731 /* case MBA_RIO_RESPONSE: */
749 case MBA_ZIO_RESPONSE: 732 case MBA_ZIO_RESPONSE:
750 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n", 733 ql_dbg(ql_dbg_async, vha, 0x5015,
751 vha->host_no)); 734 "[R|Z]IO update completion.\n");
752 735
753 if (IS_FWI2_CAPABLE(ha)) 736 if (IS_FWI2_CAPABLE(ha))
754 qla24xx_process_response_queue(vha, rsp); 737 qla24xx_process_response_queue(vha, rsp);
@@ -757,61 +740,68 @@ skip_rio:
757 break; 740 break;
758 741
759 case MBA_DISCARD_RND_FRAME: 742 case MBA_DISCARD_RND_FRAME:
760 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 743 ql_dbg(ql_dbg_async, vha, 0x5016,
761 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); 744 "Discard RND Frame -- %04x %04x %04x.\n",
745 mb[1], mb[2], mb[3]);
762 break; 746 break;
763 747
764 case MBA_TRACE_NOTIFICATION: 748 case MBA_TRACE_NOTIFICATION:
765 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 749 ql_dbg(ql_dbg_async, vha, 0x5017,
766 vha->host_no, mb[1], mb[2])); 750 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
767 break; 751 break;
768 752
769 case MBA_ISP84XX_ALERT: 753 case MBA_ISP84XX_ALERT:
770 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 754 ql_dbg(ql_dbg_async, vha, 0x5018,
771 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 755 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
756 mb[1], mb[2], mb[3]);
772 757
773 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 758 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
774 switch (mb[1]) { 759 switch (mb[1]) {
775 case A84_PANIC_RECOVERY: 760 case A84_PANIC_RECOVERY:
776 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery " 761 ql_log(ql_log_info, vha, 0x5019,
777 "%04x %04x\n", mb[2], mb[3]); 762 "Alert 84XX: panic recovery %04x %04x.\n",
763 mb[2], mb[3]);
778 break; 764 break;
779 case A84_OP_LOGIN_COMPLETE: 765 case A84_OP_LOGIN_COMPLETE:
780 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 766 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
781 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 767 ql_log(ql_log_info, vha, 0x501a,
782 "firmware version %x\n", ha->cs84xx->op_fw_version)); 768 "Alert 84XX: firmware version %x.\n",
769 ha->cs84xx->op_fw_version);
783 break; 770 break;
784 case A84_DIAG_LOGIN_COMPLETE: 771 case A84_DIAG_LOGIN_COMPLETE:
785 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 772 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
786 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 773 ql_log(ql_log_info, vha, 0x501b,
787 "diagnostic firmware version %x\n", 774 "Alert 84XX: diagnostic firmware version %x.\n",
788 ha->cs84xx->diag_fw_version)); 775 ha->cs84xx->diag_fw_version);
789 break; 776 break;
790 case A84_GOLD_LOGIN_COMPLETE: 777 case A84_GOLD_LOGIN_COMPLETE:
791 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 778 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
792 ha->cs84xx->fw_update = 1; 779 ha->cs84xx->fw_update = 1;
793 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold " 780 ql_log(ql_log_info, vha, 0x501c,
794 "firmware version %x\n", 781 "Alert 84XX: gold firmware version %x.\n",
795 ha->cs84xx->gold_fw_version)); 782 ha->cs84xx->gold_fw_version);
796 break; 783 break;
797 default: 784 default:
798 qla_printk(KERN_ERR, ha, 785 ql_log(ql_log_warn, vha, 0x501d,
799 "Alert 84xx: Invalid Alert %04x %04x %04x\n", 786 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
800 mb[1], mb[2], mb[3]); 787 mb[1], mb[2], mb[3]);
801 } 788 }
802 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 789 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
803 break; 790 break;
804 case MBA_DCBX_START: 791 case MBA_DCBX_START:
805 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n", 792 ql_dbg(ql_dbg_async, vha, 0x501e,
806 vha->host_no, mb[1], mb[2], mb[3])); 793 "DCBX Started -- %04x %04x %04x.\n",
794 mb[1], mb[2], mb[3]);
807 break; 795 break;
808 case MBA_DCBX_PARAM_UPDATE: 796 case MBA_DCBX_PARAM_UPDATE:
809 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- " 797 ql_dbg(ql_dbg_async, vha, 0x501f,
810 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 798 "DCBX Parameters Updated -- %04x %04x %04x.\n",
799 mb[1], mb[2], mb[3]);
811 break; 800 break;
812 case MBA_FCF_CONF_ERR: 801 case MBA_FCF_CONF_ERR:
813 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- " 802 ql_dbg(ql_dbg_async, vha, 0x5020,
814 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 803 "FCF Configuration Error -- %04x %04x %04x.\n",
804 mb[1], mb[2], mb[3]);
815 break; 805 break;
816 case MBA_IDC_COMPLETE: 806 case MBA_IDC_COMPLETE:
817 case MBA_IDC_NOTIFY: 807 case MBA_IDC_NOTIFY:
@@ -838,10 +828,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
838 828
839 /* Validate handle. */ 829 /* Validate handle. */
840 if (index >= MAX_OUTSTANDING_COMMANDS) { 830 if (index >= MAX_OUTSTANDING_COMMANDS) {
841 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 831 ql_log(ql_log_warn, vha, 0x3014,
842 vha->host_no, index)); 832 "Invalid SCSI command index (%x).\n", index);
843 qla_printk(KERN_WARNING, ha,
844 "Invalid SCSI completion handle %d.\n", index);
845 833
846 if (IS_QLA82XX(ha)) 834 if (IS_QLA82XX(ha))
847 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 835 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -859,10 +847,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
859 sp->cmd->result = DID_OK << 16; 847 sp->cmd->result = DID_OK << 16;
860 qla2x00_sp_compl(ha, sp); 848 qla2x00_sp_compl(ha, sp);
861 } else { 849 } else {
862 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 850 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
863 " handle(0x%x)\n", vha->host_no, req->id, index));
864 qla_printk(KERN_WARNING, ha,
865 "Invalid ISP SCSI completion handle\n");
866 851
867 if (IS_QLA82XX(ha)) 852 if (IS_QLA82XX(ha))
868 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 853 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -882,8 +867,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
882 867
883 index = LSW(pkt->handle); 868 index = LSW(pkt->handle);
884 if (index >= MAX_OUTSTANDING_COMMANDS) { 869 if (index >= MAX_OUTSTANDING_COMMANDS) {
885 qla_printk(KERN_WARNING, ha, 870 ql_log(ql_log_warn, vha, 0x5031,
886 "%s: Invalid completion handle (%x).\n", func, index); 871 "Invalid command index (%x).\n", index);
887 if (IS_QLA82XX(ha)) 872 if (IS_QLA82XX(ha))
888 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 873 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
889 else 874 else
@@ -892,15 +877,13 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
892 } 877 }
893 sp = req->outstanding_cmds[index]; 878 sp = req->outstanding_cmds[index];
894 if (!sp) { 879 if (!sp) {
895 qla_printk(KERN_WARNING, ha, 880 ql_log(ql_log_warn, vha, 0x5032,
896 "%s: Invalid completion handle (%x) -- timed-out.\n", func, 881 "Invalid completion handle (%x) -- timed-out.\n", index);
897 index);
898 return sp; 882 return sp;
899 } 883 }
900 if (sp->handle != index) { 884 if (sp->handle != index) {
901 qla_printk(KERN_WARNING, ha, 885 ql_log(ql_log_warn, vha, 0x5033,
902 "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle, 886 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
903 index);
904 return NULL; 887 return NULL;
905 } 888 }
906 889
@@ -937,17 +920,17 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
937 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 920 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
938 QLA_LOGIO_LOGIN_RETRIED : 0; 921 QLA_LOGIO_LOGIN_RETRIED : 0;
939 if (mbx->entry_status) { 922 if (mbx->entry_status) {
940 DEBUG2(printk(KERN_WARNING 923 ql_dbg(ql_dbg_async, vha, 0x5043,
941 "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x " 924 "Async-%s error entry - portid=%02x%02x%02x "
942 "entry-status=%x status=%x state-flag=%x " 925 "entry-status=%x status=%x state-flag=%x "
943 "status-flags=%x.\n", 926 "status-flags=%x.\n",
944 fcport->vha->host_no, sp->handle, type, 927 type, fcport->d_id.b.domain, fcport->d_id.b.area,
945 fcport->d_id.b.domain, fcport->d_id.b.area,
946 fcport->d_id.b.al_pa, mbx->entry_status, 928 fcport->d_id.b.al_pa, mbx->entry_status,
947 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 929 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
948 le16_to_cpu(mbx->status_flags))); 930 le16_to_cpu(mbx->status_flags));
949 931
950 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); 932 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
933 (uint8_t *)mbx, sizeof(*mbx));
951 934
952 goto logio_done; 935 goto logio_done;
953 } 936 }
@@ -957,12 +940,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
957 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 940 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
958 status = 0; 941 status = 0;
959 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 942 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
960 DEBUG2(printk(KERN_DEBUG 943 ql_dbg(ql_dbg_async, vha, 0x5045,
961 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " 944 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
962 "mbx1=%x.\n", 945 type, fcport->d_id.b.domain, fcport->d_id.b.area,
963 fcport->vha->host_no, sp->handle, type, 946 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
964 fcport->d_id.b.domain, fcport->d_id.b.area,
965 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
966 947
967 data[0] = MBS_COMMAND_COMPLETE; 948 data[0] = MBS_COMMAND_COMPLETE;
968 if (ctx->type == SRB_LOGIN_CMD) { 949 if (ctx->type == SRB_LOGIN_CMD) {
@@ -987,14 +968,14 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
987 break; 968 break;
988 } 969 }
989 970
990 DEBUG2(printk(KERN_WARNING 971 ql_log(ql_log_warn, vha, 0x5046,
991 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x " 972 "Async-%s failed - portid=%02x%02x%02x status=%x "
992 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", 973 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
993 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, 974 type, fcport->d_id.b.domain,
994 fcport->d_id.b.area, fcport->d_id.b.al_pa, status, 975 fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
995 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 976 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
996 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 977 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
997 le16_to_cpu(mbx->mb7))); 978 le16_to_cpu(mbx->mb7));
998 979
999logio_done: 980logio_done:
1000 lio->done(sp); 981 lio->done(sp);
@@ -1025,9 +1006,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1025 type = "ct pass-through"; 1006 type = "ct pass-through";
1026 break; 1007 break;
1027 default: 1008 default:
1028 qla_printk(KERN_WARNING, ha, 1009 ql_log(ql_log_warn, vha, 0x5047,
1029 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1010 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1030 sp_bsg->type);
1031 return; 1011 return;
1032 } 1012 }
1033 1013
@@ -1045,20 +1025,20 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1045 bsg_job->reply->reply_payload_rcv_len = 1025 bsg_job->reply->reply_payload_rcv_len =
1046 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1026 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1047 1027
1048 DEBUG2(qla_printk(KERN_WARNING, ha, 1028 ql_log(ql_log_warn, vha, 0x5048,
1049 "scsi(%ld): CT pass-through-%s error " 1029 "CT pass-through-%s error "
1050 "comp_status-status=0x%x total_byte = 0x%x.\n", 1030 "comp_status-status=0x%x total_byte = 0x%x.\n",
1051 vha->host_no, type, comp_status, 1031 type, comp_status,
1052 bsg_job->reply->reply_payload_rcv_len)); 1032 bsg_job->reply->reply_payload_rcv_len);
1053 } else { 1033 } else {
1054 DEBUG2(qla_printk(KERN_WARNING, ha, 1034 ql_log(ql_log_warn, vha, 0x5049,
1055 "scsi(%ld): CT pass-through-%s error " 1035 "CT pass-through-%s error "
1056 "comp_status-status=0x%x.\n", 1036 "comp_status-status=0x%x.\n", type, comp_status);
1057 vha->host_no, type, comp_status));
1058 bsg_job->reply->result = DID_ERROR << 16; 1037 bsg_job->reply->result = DID_ERROR << 16;
1059 bsg_job->reply->reply_payload_rcv_len = 0; 1038 bsg_job->reply->reply_payload_rcv_len = 0;
1060 } 1039 }
1061 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); 1040 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
1041 (uint8_t *)pkt, sizeof(*pkt));
1062 } else { 1042 } else {
1063 bsg_job->reply->result = DID_OK << 16; 1043 bsg_job->reply->result = DID_OK << 16;
1064 bsg_job->reply->reply_payload_rcv_len = 1044 bsg_job->reply->reply_payload_rcv_len =
@@ -1110,9 +1090,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1110 type = "ct pass-through"; 1090 type = "ct pass-through";
1111 break; 1091 break;
1112 default: 1092 default:
1113 qla_printk(KERN_WARNING, ha, 1093 ql_log(ql_log_warn, vha, 0x503e,
1114 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1094 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1115 sp_bsg->type);
1116 return; 1095 return;
1117 } 1096 }
1118 1097
@@ -1132,27 +1111,31 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1132 bsg_job->reply->reply_payload_rcv_len = 1111 bsg_job->reply->reply_payload_rcv_len =
1133 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1112 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1134 1113
1135 DEBUG2(qla_printk(KERN_WARNING, ha, 1114 ql_log(ql_log_info, vha, 0x503f,
1136 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " 1115 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1137 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1116 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1138 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2], 1117 type, comp_status, fw_status[1], fw_status[2],
1139 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count))); 1118 le16_to_cpu(((struct els_sts_entry_24xx *)
1119 pkt)->total_byte_count));
1140 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1120 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1141 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1121 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1142 } 1122 }
1143 else { 1123 else {
1144 DEBUG2(qla_printk(KERN_WARNING, ha, 1124 ql_log(ql_log_info, vha, 0x5040,
1145 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " 1125 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1146 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1126 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1147 vha->host_no, sp->handle, type, comp_status, 1127 type, comp_status,
1148 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1), 1128 le16_to_cpu(((struct els_sts_entry_24xx *)
1149 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2))); 1129 pkt)->error_subcode_1),
1130 le16_to_cpu(((struct els_sts_entry_24xx *)
1131 pkt)->error_subcode_2));
1150 bsg_job->reply->result = DID_ERROR << 16; 1132 bsg_job->reply->result = DID_ERROR << 16;
1151 bsg_job->reply->reply_payload_rcv_len = 0; 1133 bsg_job->reply->reply_payload_rcv_len = 0;
1152 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1134 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1153 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1135 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1154 } 1136 }
1155 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); 1137 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
1138 (uint8_t *)pkt, sizeof(*pkt));
1156 } 1139 }
1157 else { 1140 else {
1158 bsg_job->reply->result = DID_OK << 16; 1141 bsg_job->reply->result = DID_OK << 16;
@@ -1201,25 +1184,24 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1201 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1184 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1202 QLA_LOGIO_LOGIN_RETRIED : 0; 1185 QLA_LOGIO_LOGIN_RETRIED : 0;
1203 if (logio->entry_status) { 1186 if (logio->entry_status) {
1204 DEBUG2(printk(KERN_WARNING 1187 ql_log(ql_log_warn, vha, 0x5034,
1205 "scsi(%ld:%x): Async-%s error entry - " 1188 "Async-%s error entry - "
1206 "portid=%02x%02x%02x entry-status=%x.\n", 1189 "portid=%02x%02x%02x entry-status=%x.\n",
1207 fcport->vha->host_no, sp->handle, type, 1190 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1208 fcport->d_id.b.domain, fcport->d_id.b.area, 1191 fcport->d_id.b.al_pa, logio->entry_status);
1209 fcport->d_id.b.al_pa, logio->entry_status)); 1192 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
1210 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); 1193 (uint8_t *)logio, sizeof(*logio));
1211 1194
1212 goto logio_done; 1195 goto logio_done;
1213 } 1196 }
1214 1197
1215 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1198 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1216 DEBUG2(printk(KERN_DEBUG 1199 ql_dbg(ql_dbg_async, vha, 0x5036,
1217 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " 1200 "Async-%s complete - portid=%02x%02x%02x "
1218 "iop0=%x.\n", 1201 "iop0=%x.\n",
1219 fcport->vha->host_no, sp->handle, type, 1202 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1220 fcport->d_id.b.domain, fcport->d_id.b.area,
1221 fcport->d_id.b.al_pa, 1203 fcport->d_id.b.al_pa,
1222 le32_to_cpu(logio->io_parameter[0]))); 1204 le32_to_cpu(logio->io_parameter[0]));
1223 1205
1224 data[0] = MBS_COMMAND_COMPLETE; 1206 data[0] = MBS_COMMAND_COMPLETE;
1225 if (ctx->type != SRB_LOGIN_CMD) 1207 if (ctx->type != SRB_LOGIN_CMD)
@@ -1256,14 +1238,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1256 break; 1238 break;
1257 } 1239 }
1258 1240
1259 DEBUG2(printk(KERN_WARNING 1241 ql_dbg(ql_dbg_async, vha, 0x5037,
1260 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x " 1242 "Async-%s failed - portid=%02x%02x%02x comp=%x "
1261 "iop0=%x iop1=%x.\n", 1243 "iop0=%x iop1=%x.\n",
1262 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, 1244 type, fcport->d_id.b.domain,
1263 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1245 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1264 le16_to_cpu(logio->comp_status), 1246 le16_to_cpu(logio->comp_status),
1265 le32_to_cpu(logio->io_parameter[0]), 1247 le32_to_cpu(logio->io_parameter[0]),
1266 le32_to_cpu(logio->io_parameter[1]))); 1248 le32_to_cpu(logio->io_parameter[1]));
1267 1249
1268logio_done: 1250logio_done:
1269 lio->done(sp); 1251 lio->done(sp);
@@ -1292,38 +1274,34 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1292 fcport = sp->fcport; 1274 fcport = sp->fcport;
1293 1275
1294 if (sts->entry_status) { 1276 if (sts->entry_status) {
1295 DEBUG2(printk(KERN_WARNING 1277 ql_log(ql_log_warn, vha, 0x5038,
1296 "scsi(%ld:%x): Async-%s error - entry-status(%x).\n", 1278 "Async-%s error - entry-status(%x).\n",
1297 fcport->vha->host_no, sp->handle, type, 1279 type, sts->entry_status);
1298 sts->entry_status));
1299 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1300 DEBUG2(printk(KERN_WARNING 1281 ql_log(ql_log_warn, vha, 0x5039,
1301 "scsi(%ld:%x): Async-%s error - completion status(%x).\n", 1282 "Async-%s error - completion status(%x).\n",
1302 fcport->vha->host_no, sp->handle, type, 1283 type, sts->comp_status);
1303 sts->comp_status));
1304 } else if (!(le16_to_cpu(sts->scsi_status) & 1284 } else if (!(le16_to_cpu(sts->scsi_status) &
1305 SS_RESPONSE_INFO_LEN_VALID)) { 1285 SS_RESPONSE_INFO_LEN_VALID)) {
1306 DEBUG2(printk(KERN_WARNING 1286 ql_log(ql_log_warn, vha, 0x503a,
1307 "scsi(%ld:%x): Async-%s error - no response info(%x).\n", 1287 "Async-%s error - no response info(%x).\n",
1308 fcport->vha->host_no, sp->handle, type, 1288 type, sts->scsi_status);
1309 sts->scsi_status));
1310 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1311 DEBUG2(printk(KERN_WARNING 1290 ql_log(ql_log_warn, vha, 0x503b,
1312 "scsi(%ld:%x): Async-%s error - not enough response(%d).\n", 1291 "Async-%s error - not enough response(%d).\n",
1313 fcport->vha->host_no, sp->handle, type, 1292 type, sts->rsp_data_len);
1314 sts->rsp_data_len));
1315 } else if (sts->data[3]) { 1293 } else if (sts->data[3]) {
1316 DEBUG2(printk(KERN_WARNING 1294 ql_log(ql_log_warn, vha, 0x503c,
1317 "scsi(%ld:%x): Async-%s error - response(%x).\n", 1295 "Async-%s error - response(%x).\n",
1318 fcport->vha->host_no, sp->handle, type, 1296 type, sts->data[3]);
1319 sts->data[3]));
1320 } else { 1297 } else {
1321 error = 0; 1298 error = 0;
1322 } 1299 }
1323 1300
1324 if (error) { 1301 if (error) {
1325 iocb->u.tmf.data = error; 1302 iocb->u.tmf.data = error;
1326 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts))); 1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1304 (uint8_t *)sts, sizeof(*sts));
1327 } 1305 }
1328 1306
1329 iocb->done(sp); 1307 iocb->done(sp);
@@ -1360,8 +1338,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1360 } 1338 }
1361 1339
1362 if (pkt->entry_status != 0) { 1340 if (pkt->entry_status != 0) {
1363 DEBUG3(printk(KERN_INFO 1341 ql_log(ql_log_warn, vha, 0x5035,
1364 "scsi(%ld): Process error entry.\n", vha->host_no)); 1342 "Process error entry.\n");
1365 1343
1366 qla2x00_error_entry(vha, rsp, pkt); 1344 qla2x00_error_entry(vha, rsp, pkt);
1367 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1345 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1399,10 +1377,10 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1399 break; 1377 break;
1400 default: 1378 default:
1401 /* Type Not Supported. */ 1379 /* Type Not Supported. */
1402 DEBUG4(printk(KERN_WARNING 1380 ql_log(ql_log_warn, vha, 0x504a,
1403 "scsi(%ld): Received unknown response pkt type %x " 1381 "Received unknown response pkt type %x "
1404 "entry status=%x.\n", 1382 "entry status=%x.\n",
1405 vha->host_no, pkt->entry_type, pkt->entry_status)); 1383 pkt->entry_type, pkt->entry_status);
1406 break; 1384 break;
1407 } 1385 }
1408 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1386 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1418,6 +1396,7 @@ static inline void
1418qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1396qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1419 uint32_t sense_len, struct rsp_que *rsp) 1397 uint32_t sense_len, struct rsp_que *rsp)
1420{ 1398{
1399 struct scsi_qla_host *vha = sp->fcport->vha;
1421 struct scsi_cmnd *cp = sp->cmd; 1400 struct scsi_cmnd *cp = sp->cmd;
1422 1401
1423 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1402 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
@@ -1435,11 +1414,13 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1435 if (sp->request_sense_length != 0) 1414 if (sp->request_sense_length != 0)
1436 rsp->status_srb = sp; 1415 rsp->status_srb = sp;
1437 1416
1438 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 1417 ql_dbg(ql_dbg_io, vha, 0x301c,
1439 "cmd=%p\n", __func__, sp->fcport->vha->host_no, 1418 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
1440 cp->device->channel, cp->device->id, cp->device->lun, cp)); 1419 sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
1420 cp->device->lun, cp);
1441 if (sense_len) 1421 if (sense_len)
1442 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); 1422 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1423 cp->sense_buffer, sense_len);
1443} 1424}
1444 1425
1445struct scsi_dif_tuple { 1426struct scsi_dif_tuple {
@@ -1457,6 +1438,7 @@ struct scsi_dif_tuple {
1457static inline void 1438static inline void
1458qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1439qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1459{ 1440{
1441 struct scsi_qla_host *vha = sp->fcport->vha;
1460 struct scsi_cmnd *cmd = sp->cmd; 1442 struct scsi_cmnd *cmd = sp->cmd;
1461 struct scsi_dif_tuple *ep = 1443 struct scsi_dif_tuple *ep =
1462 (struct scsi_dif_tuple *)&sts24->data[20]; 1444 (struct scsi_dif_tuple *)&sts24->data[20];
@@ -1473,15 +1455,15 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1473 e_guard = be16_to_cpu(ep->guard); 1455 e_guard = be16_to_cpu(ep->guard);
1474 a_guard = be16_to_cpu(ap->guard); 1456 a_guard = be16_to_cpu(ap->guard);
1475 1457
1476 DEBUG18(printk(KERN_DEBUG 1458 ql_dbg(ql_dbg_io, vha, 0x3023,
1477 "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24)); 1459 "iocb(s) %p Returned STATUS.\n", sts24);
1478 1460
1479 DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1461 ql_dbg(ql_dbg_io, vha, 0x3024,
1462 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1480 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1463 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1481 " tag=0x%x, act guard=0x%x, exp guard=0x%x\n", 1464 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1482 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1483 a_app_tag, e_app_tag, a_guard, e_guard)); 1466 a_app_tag, e_app_tag, a_guard, e_guard);
1484
1485 1467
1486 /* check guard */ 1468 /* check guard */
1487 if (e_guard != a_guard) { 1469 if (e_guard != a_guard) {
@@ -1569,9 +1551,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1569 sp = NULL; 1551 sp = NULL;
1570 1552
1571 if (sp == NULL) { 1553 if (sp == NULL) {
1572 qla_printk(KERN_WARNING, ha, 1554 ql_log(ql_log_warn, vha, 0x3017,
1573 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, 1555 "Invalid status handle (0x%x).\n", sts->handle);
1574 sts->handle);
1575 1556
1576 if (IS_QLA82XX(ha)) 1557 if (IS_QLA82XX(ha))
1577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1558 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1582,9 +1563,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1582 } 1563 }
1583 cp = sp->cmd; 1564 cp = sp->cmd;
1584 if (cp == NULL) { 1565 if (cp == NULL) {
1585 qla_printk(KERN_WARNING, ha, 1566 ql_log(ql_log_warn, vha, 0x3018,
1586 "scsi(%ld): Command already returned (0x%x/%p).\n", 1567 "Command already returned (0x%x/%p).\n",
1587 vha->host_no, sts->handle, sp); 1568 sts->handle, sp);
1588 1569
1589 return; 1570 return;
1590 } 1571 }
@@ -1629,10 +1610,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1629 par_sense_len -= rsp_info_len; 1610 par_sense_len -= rsp_info_len;
1630 } 1611 }
1631 if (rsp_info_len > 3 && rsp_info[3]) { 1612 if (rsp_info_len > 3 && rsp_info[3]) {
1632 DEBUG2(qla_printk(KERN_INFO, ha, 1613 ql_log(ql_log_warn, vha, 0x3019,
1633 "scsi(%ld:%d:%d): FCP I/O protocol failure " 1614 "FCP I/O protocol failure (0x%x/0x%x).\n",
1634 "(0x%x/0x%x).\n", vha->host_no, cp->device->id, 1615 rsp_info_len, rsp_info[3]);
1635 cp->device->lun, rsp_info_len, rsp_info[3]));
1636 1616
1637 cp->result = DID_BUS_BUSY << 16; 1617 cp->result = DID_BUS_BUSY << 16;
1638 goto out; 1618 goto out;
@@ -1661,11 +1641,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1661 if (!lscsi_status && 1641 if (!lscsi_status &&
1662 ((unsigned)(scsi_bufflen(cp) - resid) < 1642 ((unsigned)(scsi_bufflen(cp) - resid) <
1663 cp->underflow)) { 1643 cp->underflow)) {
1664 qla_printk(KERN_INFO, ha, 1644 ql_log(ql_log_warn, vha, 0x301a,
1665 "scsi(%ld:%d:%d): Mid-layer underflow " 1645 "Mid-layer underflow "
1666 "detected (0x%x of 0x%x bytes).\n", 1646 "detected (0x%x of 0x%x bytes).\n",
1667 vha->host_no, cp->device->id, 1647 resid, scsi_bufflen(cp));
1668 cp->device->lun, resid, scsi_bufflen(cp));
1669 1648
1670 cp->result = DID_ERROR << 16; 1649 cp->result = DID_ERROR << 16;
1671 break; 1650 break;
@@ -1674,9 +1653,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1674 cp->result = DID_OK << 16 | lscsi_status; 1653 cp->result = DID_OK << 16 | lscsi_status;
1675 1654
1676 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1655 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1677 DEBUG2(qla_printk(KERN_INFO, ha, 1656 ql_log(ql_log_warn, vha, 0x301b,
1678 "scsi(%ld:%d:%d) QUEUE FULL detected.\n", 1657 "QUEUE FULL detected.\n");
1679 vha->host_no, cp->device->id, cp->device->lun));
1680 break; 1658 break;
1681 } 1659 }
1682 logit = 0; 1660 logit = 0;
@@ -1697,11 +1675,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1697 scsi_set_resid(cp, resid); 1675 scsi_set_resid(cp, resid);
1698 if (scsi_status & SS_RESIDUAL_UNDER) { 1676 if (scsi_status & SS_RESIDUAL_UNDER) {
1699 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1677 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1700 DEBUG2(qla_printk(KERN_INFO, ha, 1678 ql_log(ql_log_warn, vha, 0x301d,
1701 "scsi(%ld:%d:%d) Dropped frame(s) detected " 1679 "Dropped frame(s) detected "
1702 "(0x%x of 0x%x bytes).\n", vha->host_no, 1680 "(0x%x of 0x%x bytes).\n",
1703 cp->device->id, cp->device->lun, resid, 1681 resid, scsi_bufflen(cp));
1704 scsi_bufflen(cp)));
1705 1682
1706 cp->result = DID_ERROR << 16 | lscsi_status; 1683 cp->result = DID_ERROR << 16 | lscsi_status;
1707 break; 1684 break;
@@ -1710,20 +1687,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1710 if (!lscsi_status && 1687 if (!lscsi_status &&
1711 ((unsigned)(scsi_bufflen(cp) - resid) < 1688 ((unsigned)(scsi_bufflen(cp) - resid) <
1712 cp->underflow)) { 1689 cp->underflow)) {
1713 qla_printk(KERN_INFO, ha, 1690 ql_log(ql_log_warn, vha, 0x301e,
1714 "scsi(%ld:%d:%d): Mid-layer underflow " 1691 "Mid-layer underflow "
1715 "detected (0x%x of 0x%x bytes).\n", 1692 "detected (0x%x of 0x%x bytes).\n",
1716 vha->host_no, cp->device->id, 1693 resid, scsi_bufflen(cp));
1717 cp->device->lun, resid, scsi_bufflen(cp));
1718 1694
1719 cp->result = DID_ERROR << 16; 1695 cp->result = DID_ERROR << 16;
1720 break; 1696 break;
1721 } 1697 }
1722 } else { 1698 } else {
1723 DEBUG2(qla_printk(KERN_INFO, ha, 1699 ql_log(ql_log_warn, vha, 0x301f,
1724 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " 1700 "Dropped frame(s) detected (0x%x "
1725 "of 0x%x bytes).\n", vha->host_no, cp->device->id, 1701 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1726 cp->device->lun, resid, scsi_bufflen(cp)));
1727 1702
1728 cp->result = DID_ERROR << 16 | lscsi_status; 1703 cp->result = DID_ERROR << 16 | lscsi_status;
1729 goto check_scsi_status; 1704 goto check_scsi_status;
@@ -1739,10 +1714,8 @@ check_scsi_status:
1739 */ 1714 */
1740 if (lscsi_status != 0) { 1715 if (lscsi_status != 0) {
1741 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1716 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1742 DEBUG2(qla_printk(KERN_INFO, ha, 1717 ql_log(ql_log_warn, vha, 0x3020,
1743 "scsi(%ld:%d:%d) QUEUE FULL detected.\n", 1718 "QUEUE FULL detected.\n");
1744 vha->host_no, cp->device->id,
1745 cp->device->lun));
1746 logit = 1; 1719 logit = 1;
1747 break; 1720 break;
1748 } 1721 }
@@ -1781,10 +1754,9 @@ check_scsi_status:
1781 break; 1754 break;
1782 } 1755 }
1783 1756
1784 DEBUG2(qla_printk(KERN_INFO, ha, 1757 ql_dbg(ql_dbg_io, vha, 0x3021,
1785 "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n", 1758 "Port down status: port-state=0x%x.\n",
1786 vha->host_no, cp->device->id, cp->device->lun, 1759 atomic_read(&fcport->state));
1787 atomic_read(&fcport->state)));
1788 1760
1789 if (atomic_read(&fcport->state) == FCS_ONLINE) 1761 if (atomic_read(&fcport->state) == FCS_ONLINE)
1790 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1762 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -1804,15 +1776,13 @@ check_scsi_status:
1804 1776
1805out: 1777out:
1806 if (logit) 1778 if (logit)
1807 DEBUG2(qla_printk(KERN_INFO, ha, 1779 ql_dbg(ql_dbg_io, vha, 0x3022,
1808 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " 1780 "FCP command status: 0x%x-0x%x (0x%x) "
1809 "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1781 "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1810 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, 1782 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1811 cp->device->id, cp->device->lun, comp_status, scsi_status, 1783 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
1812 cp->result, fcport->d_id.b.domain, fcport->d_id.b.area, 1784 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1813 fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1], 1785 resid_len, fw_resid_len);
1814 cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
1815 fw_resid_len));
1816 1786
1817 if (rsp->status_srb == NULL) 1787 if (rsp->status_srb == NULL)
1818 qla2x00_sp_compl(ha, sp); 1788 qla2x00_sp_compl(ha, sp);
@@ -1830,16 +1800,15 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1830{ 1800{
1831 uint8_t sense_sz = 0; 1801 uint8_t sense_sz = 0;
1832 struct qla_hw_data *ha = rsp->hw; 1802 struct qla_hw_data *ha = rsp->hw;
1803 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1833 srb_t *sp = rsp->status_srb; 1804 srb_t *sp = rsp->status_srb;
1834 struct scsi_cmnd *cp; 1805 struct scsi_cmnd *cp;
1835 1806
1836 if (sp != NULL && sp->request_sense_length != 0) { 1807 if (sp != NULL && sp->request_sense_length != 0) {
1837 cp = sp->cmd; 1808 cp = sp->cmd;
1838 if (cp == NULL) { 1809 if (cp == NULL) {
1839 DEBUG2(printk("%s(): Cmd already returned back to OS " 1810 ql_log(ql_log_warn, vha, 0x3025,
1840 "sp=%p.\n", __func__, sp)); 1811 "cmd is NULL: already returned to OS (sp=%p).\n",
1841 qla_printk(KERN_INFO, ha,
1842 "cmd is NULL: already returned to OS (sp=%p)\n",
1843 sp); 1812 sp);
1844 1813
1845 rsp->status_srb = NULL; 1814 rsp->status_srb = NULL;
@@ -1856,7 +1825,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1856 if (IS_FWI2_CAPABLE(ha)) 1825 if (IS_FWI2_CAPABLE(ha))
1857 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1826 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1858 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1827 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1859 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); 1828 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1829 sp->request_sense_ptr, sense_sz);
1860 1830
1861 sp->request_sense_ptr += sense_sz; 1831 sp->request_sense_ptr += sense_sz;
1862 sp->request_sense_length -= sense_sz; 1832 sp->request_sense_length -= sense_sz;
@@ -1882,21 +1852,25 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1882 uint32_t handle = LSW(pkt->handle); 1852 uint32_t handle = LSW(pkt->handle);
1883 uint16_t que = MSW(pkt->handle); 1853 uint16_t que = MSW(pkt->handle);
1884 struct req_que *req = ha->req_q_map[que]; 1854 struct req_que *req = ha->req_q_map[que];
1885#if defined(QL_DEBUG_LEVEL_2) 1855
1886 if (pkt->entry_status & RF_INV_E_ORDER) 1856 if (pkt->entry_status & RF_INV_E_ORDER)
1887 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1857 ql_dbg(ql_dbg_async, vha, 0x502a,
1858 "Invalid Entry Order.\n");
1888 else if (pkt->entry_status & RF_INV_E_COUNT) 1859 else if (pkt->entry_status & RF_INV_E_COUNT)
1889 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__); 1860 ql_dbg(ql_dbg_async, vha, 0x502b,
1861 "Invalid Entry Count.\n");
1890 else if (pkt->entry_status & RF_INV_E_PARAM) 1862 else if (pkt->entry_status & RF_INV_E_PARAM)
1891 qla_printk(KERN_ERR, ha, 1863 ql_dbg(ql_dbg_async, vha, 0x502c,
1892 "%s: Invalid Entry Parameter\n", __func__); 1864 "Invalid Entry Parameter.\n");
1893 else if (pkt->entry_status & RF_INV_E_TYPE) 1865 else if (pkt->entry_status & RF_INV_E_TYPE)
1894 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__); 1866 ql_dbg(ql_dbg_async, vha, 0x502d,
1867 "Invalid Entry Type.\n");
1895 else if (pkt->entry_status & RF_BUSY) 1868 else if (pkt->entry_status & RF_BUSY)
1896 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__); 1869 ql_dbg(ql_dbg_async, vha, 0x502e,
1870 "Busy.\n");
1897 else 1871 else
1898 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__); 1872 ql_dbg(ql_dbg_async, vha, 0x502f,
1899#endif 1873 "UNKNOWN flag error.\n");
1900 1874
1901 /* Validate handle. */ 1875 /* Validate handle. */
1902 if (handle < MAX_OUTSTANDING_COMMANDS) 1876 if (handle < MAX_OUTSTANDING_COMMANDS)
@@ -1923,10 +1897,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1923 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1897 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1924 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1898 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1925 || pkt->entry_type == COMMAND_TYPE_6) { 1899 || pkt->entry_type == COMMAND_TYPE_6) {
1926 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1900 ql_log(ql_log_warn, vha, 0x5030,
1927 vha->host_no)); 1901 "Error entry - invalid handle.\n");
1928 qla_printk(KERN_WARNING, ha,
1929 "Error entry - invalid handle\n");
1930 1902
1931 if (IS_QLA82XX(ha)) 1903 if (IS_QLA82XX(ha))
1932 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1904 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1960,11 +1932,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1960 } 1932 }
1961 1933
1962 if (ha->mcp) { 1934 if (ha->mcp) {
1963 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1935 ql_dbg(ql_dbg_async, vha, 0x504d,
1964 __func__, vha->host_no, ha->mcp->mb[0])); 1936 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1965 } else { 1937 } else {
1966 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1938 ql_dbg(ql_dbg_async, vha, 0x504e,
1967 __func__, vha->host_no)); 1939 "MBX pointer ERROR.\n");
1968 } 1940 }
1969} 1941}
1970 1942
@@ -1993,8 +1965,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1993 } 1965 }
1994 1966
1995 if (pkt->entry_status != 0) { 1967 if (pkt->entry_status != 0) {
1996 DEBUG3(printk(KERN_INFO 1968 ql_dbg(ql_dbg_async, vha, 0x5029,
1997 "scsi(%ld): Process error entry.\n", vha->host_no)); 1969 "Process error entry.\n");
1998 1970
1999 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 1971 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2000 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1972 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2030,10 +2002,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2030 break; 2002 break;
2031 default: 2003 default:
2032 /* Type Not Supported. */ 2004 /* Type Not Supported. */
2033 DEBUG4(printk(KERN_WARNING 2005 ql_dbg(ql_dbg_async, vha, 0x5042,
2034 "scsi(%ld): Received unknown response pkt type %x " 2006 "Received unknown response pkt type %x "
2035 "entry status=%x.\n", 2007 "entry status=%x.\n",
2036 vha->host_no, pkt->entry_type, pkt->entry_status)); 2008 pkt->entry_type, pkt->entry_status);
2037 break; 2009 break;
2038 } 2010 }
2039 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2011 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2088,7 +2060,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2088 2060
2089next_test: 2061next_test:
2090 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3) 2062 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2091 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n"); 2063 ql_log(ql_log_info, vha, 0x504c,
2064 "Additional code -- 0x55AA.\n");
2092 2065
2093done: 2066done:
2094 WRT_REG_DWORD(&reg->iobase_window, 0x0000); 2067 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
@@ -2121,7 +2094,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
2121 rsp = (struct rsp_que *) dev_id; 2094 rsp = (struct rsp_que *) dev_id;
2122 if (!rsp) { 2095 if (!rsp) {
2123 printk(KERN_INFO 2096 printk(KERN_INFO
2124 "%s(): NULL response queue pointer\n", __func__); 2097 "%s(): NULL response queue pointer.\n", __func__);
2125 return IRQ_NONE; 2098 return IRQ_NONE;
2126 } 2099 }
2127 2100
@@ -2142,8 +2115,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
2142 2115
2143 hccr = RD_REG_DWORD(&reg->hccr); 2116 hccr = RD_REG_DWORD(&reg->hccr);
2144 2117
2145 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 2118 ql_log(ql_log_warn, vha, 0x504b,
2146 "Dumping firmware!\n", hccr); 2119 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2120 hccr);
2147 2121
2148 qla2xxx_check_risc_status(vha); 2122 qla2xxx_check_risc_status(vha);
2149 2123
@@ -2174,9 +2148,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
2174 qla24xx_process_response_queue(vha, rsp); 2148 qla24xx_process_response_queue(vha, rsp);
2175 break; 2149 break;
2176 default: 2150 default:
2177 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2151 ql_dbg(ql_dbg_async, vha, 0x504f,
2178 "(%d).\n", 2152 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2179 vha->host_no, stat & 0xff));
2180 break; 2153 break;
2181 } 2154 }
2182 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2155 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2205,7 +2178,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2205 rsp = (struct rsp_que *) dev_id; 2178 rsp = (struct rsp_que *) dev_id;
2206 if (!rsp) { 2179 if (!rsp) {
2207 printk(KERN_INFO 2180 printk(KERN_INFO
2208 "%s(): NULL response queue pointer\n", __func__); 2181 "%s(): NULL response queue pointer.\n", __func__);
2209 return IRQ_NONE; 2182 return IRQ_NONE;
2210 } 2183 }
2211 ha = rsp->hw; 2184 ha = rsp->hw;
@@ -2235,7 +2208,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2235 rsp = (struct rsp_que *) dev_id; 2208 rsp = (struct rsp_que *) dev_id;
2236 if (!rsp) { 2209 if (!rsp) {
2237 printk(KERN_INFO 2210 printk(KERN_INFO
2238 "%s(): NULL response queue pointer\n", __func__); 2211 "%s(): NULL response queue pointer.\n", __func__);
2239 return IRQ_NONE; 2212 return IRQ_NONE;
2240 } 2213 }
2241 ha = rsp->hw; 2214 ha = rsp->hw;
@@ -2268,8 +2241,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2268 2241
2269 rsp = (struct rsp_que *) dev_id; 2242 rsp = (struct rsp_que *) dev_id;
2270 if (!rsp) { 2243 if (!rsp) {
2271 DEBUG(printk( 2244 printk(KERN_INFO
2272 "%s(): NULL response queue pointer\n", __func__)); 2245 "%s(): NULL response queue pointer.\n", __func__);
2273 return IRQ_NONE; 2246 return IRQ_NONE;
2274 } 2247 }
2275 ha = rsp->hw; 2248 ha = rsp->hw;
@@ -2286,8 +2259,9 @@ qla24xx_msix_default(int irq, void *dev_id)
2286 2259
2287 hccr = RD_REG_DWORD(&reg->hccr); 2260 hccr = RD_REG_DWORD(&reg->hccr);
2288 2261
2289 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 2262 ql_log(ql_log_info, vha, 0x5050,
2290 "Dumping firmware!\n", hccr); 2263 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2264 hccr);
2291 2265
2292 qla2xxx_check_risc_status(vha); 2266 qla2xxx_check_risc_status(vha);
2293 2267
@@ -2318,9 +2292,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2318 qla24xx_process_response_queue(vha, rsp); 2292 qla24xx_process_response_queue(vha, rsp);
2319 break; 2293 break;
2320 default: 2294 default:
2321 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2295 ql_dbg(ql_dbg_async, vha, 0x5051,
2322 "(%d).\n", 2296 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2323 vha->host_no, stat & 0xff));
2324 break; 2297 break;
2325 } 2298 }
2326 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2299 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2358,6 +2331,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2358{ 2331{
2359 int i; 2332 int i;
2360 struct qla_msix_entry *qentry; 2333 struct qla_msix_entry *qentry;
2334 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2361 2335
2362 for (i = 0; i < ha->msix_count; i++) { 2336 for (i = 0; i < ha->msix_count; i++) {
2363 qentry = &ha->msix_entries[i]; 2337 qentry = &ha->msix_entries[i];
@@ -2368,6 +2342,8 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2368 kfree(ha->msix_entries); 2342 kfree(ha->msix_entries);
2369 ha->msix_entries = NULL; 2343 ha->msix_entries = NULL;
2370 ha->flags.msix_enabled = 0; 2344 ha->flags.msix_enabled = 0;
2345 ql_dbg(ql_dbg_init, vha, 0x0042,
2346 "Disabled the MSI.\n");
2371} 2347}
2372 2348
2373static int 2349static int
@@ -2377,11 +2353,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2377 int i, ret; 2353 int i, ret;
2378 struct msix_entry *entries; 2354 struct msix_entry *entries;
2379 struct qla_msix_entry *qentry; 2355 struct qla_msix_entry *qentry;
2356 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2380 2357
2381 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2358 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2382 GFP_KERNEL); 2359 GFP_KERNEL);
2383 if (!entries) 2360 if (!entries) {
2361 ql_log(ql_log_warn, vha, 0x00bc,
2362 "Failed to allocate memory for msix_entry.\n");
2384 return -ENOMEM; 2363 return -ENOMEM;
2364 }
2385 2365
2386 for (i = 0; i < ha->msix_count; i++) 2366 for (i = 0; i < ha->msix_count; i++)
2387 entries[i].entry = i; 2367 entries[i].entry = i;
@@ -2391,16 +2371,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2391 if (ret < MIN_MSIX_COUNT) 2371 if (ret < MIN_MSIX_COUNT)
2392 goto msix_failed; 2372 goto msix_failed;
2393 2373
2394 qla_printk(KERN_WARNING, ha, 2374 ql_log(ql_log_warn, vha, 0x00c6,
2395 "MSI-X: Failed to enable support -- %d/%d\n" 2375 "MSI-X: Failed to enable support "
2396 " Retry with %d vectors\n", ha->msix_count, ret, ret); 2376 "-- %d/%d\n Retry with %d vectors.\n",
2377 ha->msix_count, ret, ret);
2397 ha->msix_count = ret; 2378 ha->msix_count = ret;
2398 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2379 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2399 if (ret) { 2380 if (ret) {
2400msix_failed: 2381msix_failed:
2401 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" 2382 ql_log(ql_log_fatal, vha, 0x00c7,
2402 " support, giving up -- %d/%d\n", 2383 "MSI-X: Failed to enable support, "
2403 ha->msix_count, ret); 2384 "giving up -- %d/%d.\n",
2385 ha->msix_count, ret);
2404 goto msix_out; 2386 goto msix_out;
2405 } 2387 }
2406 ha->max_rsp_queues = ha->msix_count - 1; 2388 ha->max_rsp_queues = ha->msix_count - 1;
@@ -2408,6 +2390,8 @@ msix_failed:
2408 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2390 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2409 ha->msix_count, GFP_KERNEL); 2391 ha->msix_count, GFP_KERNEL);
2410 if (!ha->msix_entries) { 2392 if (!ha->msix_entries) {
2393 ql_log(ql_log_fatal, vha, 0x00c8,
2394 "Failed to allocate memory for ha->msix_entries.\n");
2411 ret = -ENOMEM; 2395 ret = -ENOMEM;
2412 goto msix_out; 2396 goto msix_out;
2413 } 2397 }
@@ -2434,9 +2418,9 @@ msix_failed:
2434 0, msix_entries[i].name, rsp); 2418 0, msix_entries[i].name, rsp);
2435 } 2419 }
2436 if (ret) { 2420 if (ret) {
2437 qla_printk(KERN_WARNING, ha, 2421 ql_log(ql_log_fatal, vha, 0x00cb,
2438 "MSI-X: Unable to register handler -- %x/%d.\n", 2422 "MSI-X: unable to register handler -- %x/%d.\n",
2439 qentry->vector, ret); 2423 qentry->vector, ret);
2440 qla24xx_disable_msix(ha); 2424 qla24xx_disable_msix(ha);
2441 ha->mqenable = 0; 2425 ha->mqenable = 0;
2442 goto msix_out; 2426 goto msix_out;
@@ -2449,6 +2433,12 @@ msix_failed:
2449 /* Enable MSI-X vector for response queue update for queue 0 */ 2433 /* Enable MSI-X vector for response queue update for queue 0 */
2450 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2434 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2451 ha->mqenable = 1; 2435 ha->mqenable = 1;
2436 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2437 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2438 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2439 ql_dbg(ql_dbg_init, vha, 0x0055,
2440 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2441 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2452 2442
2453msix_out: 2443msix_out:
2454 kfree(entries); 2444 kfree(entries);
@@ -2460,6 +2450,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2460{ 2450{
2461 int ret; 2451 int ret;
2462 device_reg_t __iomem *reg = ha->iobase; 2452 device_reg_t __iomem *reg = ha->iobase;
2453 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2463 2454
2464 /* If possible, enable MSI-X. */ 2455 /* If possible, enable MSI-X. */
2465 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2456 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
@@ -2470,30 +2461,30 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2470 (ha->pdev->subsystem_device == 0x7040 || 2461 (ha->pdev->subsystem_device == 0x7040 ||
2471 ha->pdev->subsystem_device == 0x7041 || 2462 ha->pdev->subsystem_device == 0x7041 ||
2472 ha->pdev->subsystem_device == 0x1705)) { 2463 ha->pdev->subsystem_device == 0x1705)) {
2473 DEBUG2(qla_printk(KERN_WARNING, ha, 2464 ql_log(ql_log_warn, vha, 0x0034,
2474 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n", 2465 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2475 ha->pdev->subsystem_vendor, 2466 ha->pdev->subsystem_vendor,
2476 ha->pdev->subsystem_device)); 2467 ha->pdev->subsystem_device);
2477 goto skip_msi; 2468 goto skip_msi;
2478 } 2469 }
2479 2470
2480 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2471 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2481 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 2472 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2482 DEBUG2(qla_printk(KERN_WARNING, ha, 2473 ql_log(ql_log_warn, vha, 0x0035,
2483 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 2474 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2484 ha->pdev->revision, ha->fw_attributes)); 2475 ha->pdev->revision, ha->fw_attributes);
2485 goto skip_msix; 2476 goto skip_msix;
2486 } 2477 }
2487 2478
2488 ret = qla24xx_enable_msix(ha, rsp); 2479 ret = qla24xx_enable_msix(ha, rsp);
2489 if (!ret) { 2480 if (!ret) {
2490 DEBUG2(qla_printk(KERN_INFO, ha, 2481 ql_dbg(ql_dbg_init, vha, 0x0036,
2491 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 2482 "MSI-X: Enabled (0x%X, 0x%X).\n",
2492 ha->fw_attributes)); 2483 ha->chip_revision, ha->fw_attributes);
2493 goto clear_risc_ints; 2484 goto clear_risc_ints;
2494 } 2485 }
2495 qla_printk(KERN_WARNING, ha, 2486 ql_log(ql_log_info, vha, 0x0037,
2496 "MSI-X: Falling back-to MSI mode -- %d.\n", ret); 2487 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2497skip_msix: 2488skip_msix:
2498 2489
2499 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2490 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2502,18 +2493,19 @@ skip_msix:
2502 2493
2503 ret = pci_enable_msi(ha->pdev); 2494 ret = pci_enable_msi(ha->pdev);
2504 if (!ret) { 2495 if (!ret) {
2505 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 2496 ql_dbg(ql_dbg_init, vha, 0x0038,
2497 "MSI: Enabled.\n");
2506 ha->flags.msi_enabled = 1; 2498 ha->flags.msi_enabled = 1;
2507 } else 2499 } else
2508 qla_printk(KERN_WARNING, ha, 2500 ql_log(ql_log_warn, vha, 0x0039,
2509 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 2501 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2510skip_msi: 2502skip_msi:
2511 2503
2512 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2504 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2513 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2505 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2514 QLA2XXX_DRIVER_NAME, rsp); 2506 QLA2XXX_DRIVER_NAME, rsp);
2515 if (ret) { 2507 if (ret) {
2516 qla_printk(KERN_WARNING, ha, 2508 ql_log(ql_log_warn, vha, 0x003a,
2517 "Failed to reserve interrupt %d already in use.\n", 2509 "Failed to reserve interrupt %d already in use.\n",
2518 ha->pdev->irq); 2510 ha->pdev->irq);
2519 goto fail; 2511 goto fail;
@@ -2563,13 +2555,14 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2563 struct qla_hw_data *ha = rsp->hw; 2555 struct qla_hw_data *ha = rsp->hw;
2564 struct qla_init_msix_entry *intr = &msix_entries[2]; 2556 struct qla_init_msix_entry *intr = &msix_entries[2];
2565 struct qla_msix_entry *msix = rsp->msix; 2557 struct qla_msix_entry *msix = rsp->msix;
2558 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2566 int ret; 2559 int ret;
2567 2560
2568 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2561 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2569 if (ret) { 2562 if (ret) {
2570 qla_printk(KERN_WARNING, ha, 2563 ql_log(ql_log_fatal, vha, 0x00e6,
2571 "MSI-X: Unable to register handler -- %x/%d.\n", 2564 "MSI-X: Unable to register handler -- %x/%d.\n",
2572 msix->vector, ret); 2565 msix->vector, ret);
2573 return ret; 2566 return ret;
2574 } 2567 }
2575 msix->have_irq = 1; 2568 msix->have_irq = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index c26f0acdfecc..f7604ea1af83 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,14 +46,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
46 struct qla_hw_data *ha = vha->hw; 46 struct qla_hw_data *ha = vha->hw;
47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
48 48
49 if (ha->pdev->error_state > pci_channel_io_frozen) 49 ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
50
51 if (ha->pdev->error_state > pci_channel_io_frozen) {
52 ql_log(ql_log_warn, base_vha, 0x1001,
53 "error_state is greater than pci_channel_io_frozen, "
54 "exiting.\n");
50 return QLA_FUNCTION_TIMEOUT; 55 return QLA_FUNCTION_TIMEOUT;
56 }
51 57
52 if (vha->device_flags & DFLG_DEV_FAILED) { 58 if (vha->device_flags & DFLG_DEV_FAILED) {
53 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 59 ql_log(ql_log_warn, base_vha, 0x1002,
54 "%s(%ld): Device in failed state, " 60 "Device in failed state, exiting.\n");
55 "timeout MBX Exiting.\n",
56 __func__, base_vha->host_no));
57 return QLA_FUNCTION_TIMEOUT; 61 return QLA_FUNCTION_TIMEOUT;
58 } 62 }
59 63
@@ -63,17 +67,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
63 rval = QLA_SUCCESS; 67 rval = QLA_SUCCESS;
64 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 68 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
65 69
66 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
67 70
68 if (ha->flags.pci_channel_io_perm_failure) { 71 if (ha->flags.pci_channel_io_perm_failure) {
69 DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " 72 ql_log(ql_log_warn, base_vha, 0x1003,
70 "Exiting.\n", __func__, vha->host_no)); 73 "Perm failure on EEH timeout MBX, exiting.\n");
71 return QLA_FUNCTION_TIMEOUT; 74 return QLA_FUNCTION_TIMEOUT;
72 } 75 }
73 76
74 if (ha->flags.isp82xx_fw_hung) { 77 if (ha->flags.isp82xx_fw_hung) {
75 /* Setting Link-Down error */ 78 /* Setting Link-Down error */
76 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
77 rval = QLA_FUNCTION_FAILED; 82 rval = QLA_FUNCTION_FAILED;
78 goto premature_exit; 83 goto premature_exit;
79 } 84 }
@@ -85,8 +90,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
85 */ 90 */
86 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 91 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
87 /* Timeout occurred. Return error. */ 92 /* Timeout occurred. Return error. */
88 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 93 ql_log(ql_log_warn, base_vha, 0x1005,
89 "Exiting.\n", __func__, base_vha->host_no)); 94 "Cmd access timeout, Exiting.\n");
90 return QLA_FUNCTION_TIMEOUT; 95 return QLA_FUNCTION_TIMEOUT;
91 } 96 }
92 97
@@ -94,8 +99,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
94 /* Save mailbox command for debug */ 99 /* Save mailbox command for debug */
95 ha->mcp = mcp; 100 ha->mcp = mcp;
96 101
97 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 102 ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
98 base_vha->host_no, mcp->mb[0])); 103 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
99 104
100 spin_lock_irqsave(&ha->hardware_lock, flags); 105 spin_lock_irqsave(&ha->hardware_lock, flags);
101 106
@@ -123,27 +128,30 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
123 iptr++; 128 iptr++;
124 } 129 }
125 130
126#if defined(QL_DEBUG_LEVEL_1) 131 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
127 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", 132 "Loaded MBX registers (displayed in bytes) =.\n");
128 __func__, base_vha->host_no); 133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
129 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); 134 (uint8_t *)mcp->mb, 16);
130 printk("\n"); 135 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
131 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); 136 ".\n");
132 printk("\n"); 137 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
133 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); 138 ((uint8_t *)mcp->mb + 0x10), 16);
134 printk("\n"); 139 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
135 printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no, 140 ".\n");
136 optr); 141 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
137 qla2x00_dump_regs(base_vha); 142 ((uint8_t *)mcp->mb + 0x20), 8);
138#endif 143 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
144 "I/O Address = %p.\n", optr);
145 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
139 146
140 /* Issue set host interrupt command to send cmd out. */ 147 /* Issue set host interrupt command to send cmd out. */
141 ha->flags.mbox_int = 0; 148 ha->flags.mbox_int = 0;
142 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 149 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
143 150
144 /* Unlock mbx registers and wait for interrupt */ 151 /* Unlock mbx registers and wait for interrupt */
145 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 152 ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
146 "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies)); 153 "Going to unlock irq & waiting for interrupts. "
154 "jiffies=%lx.\n", jiffies);
147 155
148 /* Wait for mbx cmd completion until timeout */ 156 /* Wait for mbx cmd completion until timeout */
149 157
@@ -155,9 +163,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
155 HINT_MBX_INT_PENDING) { 163 HINT_MBX_INT_PENDING) {
156 spin_unlock_irqrestore(&ha->hardware_lock, 164 spin_unlock_irqrestore(&ha->hardware_lock,
157 flags); 165 flags);
158 DEBUG2_3_11(printk(KERN_INFO 166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
159 "%s(%ld): Pending Mailbox timeout. " 167 "Pending mailbox timeout, exiting.\n");
160 "Exiting.\n", __func__, base_vha->host_no));
161 rval = QLA_FUNCTION_TIMEOUT; 168 rval = QLA_FUNCTION_TIMEOUT;
162 goto premature_exit; 169 goto premature_exit;
163 } 170 }
@@ -173,17 +180,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
173 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 180 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
174 181
175 } else { 182 } else {
176 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 183 ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
177 base_vha->host_no, command)); 184 "Cmd=%x Polling Mode.\n", command);
178 185
179 if (IS_QLA82XX(ha)) { 186 if (IS_QLA82XX(ha)) {
180 if (RD_REG_DWORD(&reg->isp82.hint) & 187 if (RD_REG_DWORD(&reg->isp82.hint) &
181 HINT_MBX_INT_PENDING) { 188 HINT_MBX_INT_PENDING) {
182 spin_unlock_irqrestore(&ha->hardware_lock, 189 spin_unlock_irqrestore(&ha->hardware_lock,
183 flags); 190 flags);
184 DEBUG2_3_11(printk(KERN_INFO 191 ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
185 "%s(%ld): Pending Mailbox timeout. " 192 "Pending mailbox timeout, exiting.\n");
186 "Exiting.\n", __func__, base_vha->host_no));
187 rval = QLA_FUNCTION_TIMEOUT; 193 rval = QLA_FUNCTION_TIMEOUT;
188 goto premature_exit; 194 goto premature_exit;
189 } 195 }
@@ -207,17 +213,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
207 command == MBC_LOAD_RISC_RAM_EXTENDED)) 213 command == MBC_LOAD_RISC_RAM_EXTENDED))
208 msleep(10); 214 msleep(10);
209 } /* while */ 215 } /* while */
210 DEBUG17(qla_printk(KERN_WARNING, ha, 216 ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
211 "Waited %d sec\n", 217 "Waited %d sec.\n",
212 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); 218 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
213 } 219 }
214 220
215 /* Check whether we timed out */ 221 /* Check whether we timed out */
216 if (ha->flags.mbox_int) { 222 if (ha->flags.mbox_int) {
217 uint16_t *iptr2; 223 uint16_t *iptr2;
218 224
219 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 225 ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
220 base_vha->host_no, command)); 226 "Cmd=%x completed.\n", command);
221 227
222 /* Got interrupt. Clear the flag. */ 228 /* Got interrupt. Clear the flag. */
223 ha->flags.mbox_int = 0; 229 ha->flags.mbox_int = 0;
@@ -229,6 +235,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
229 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 235 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
230 ha->mcp = NULL; 236 ha->mcp = NULL;
231 rval = QLA_FUNCTION_FAILED; 237 rval = QLA_FUNCTION_FAILED;
238 ql_log(ql_log_warn, base_vha, 0x1015,
239 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
232 goto premature_exit; 240 goto premature_exit;
233 } 241 }
234 242
@@ -249,8 +257,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
249 } 257 }
250 } else { 258 } else {
251 259
252#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3) || \
253 defined(QL_DEBUG_LEVEL_11)
254 uint16_t mb0; 260 uint16_t mb0;
255 uint32_t ictrl; 261 uint32_t ictrl;
256 262
@@ -261,14 +267,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
261 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); 267 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
262 ictrl = RD_REG_WORD(&reg->isp.ictrl); 268 ictrl = RD_REG_WORD(&reg->isp.ictrl);
263 } 269 }
264 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 270 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
265 __func__, base_vha->host_no, command); 271 "MBX Command timeout for cmd %x.\n", command);
266 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, 272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
267 base_vha->host_no, ictrl, jiffies); 273 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
268 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, 274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
269 base_vha->host_no, mb0); 275 "mb[0] = 0x%x.\n", mb0);
270 qla2x00_dump_regs(base_vha); 276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
271#endif
272 277
273 rval = QLA_FUNCTION_TIMEOUT; 278 rval = QLA_FUNCTION_TIMEOUT;
274 } 279 }
@@ -279,8 +284,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
279 ha->mcp = NULL; 284 ha->mcp = NULL;
280 285
281 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 286 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
282 DEBUG11(printk("%s(%ld): checking for additional resp " 287 ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
283 "interrupt.\n", __func__, base_vha->host_no)); 288 "Checking for additional resp interrupt.\n");
284 289
285 /* polling mode for non isp_abort commands. */ 290 /* polling mode for non isp_abort commands. */
286 qla2x00_poll(ha->rsp_q_map[0]); 291 qla2x00_poll(ha->rsp_q_map[0]);
@@ -291,38 +296,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
291 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 296 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
292 ha->flags.eeh_busy) { 297 ha->flags.eeh_busy) {
293 /* not in dpc. schedule it for dpc to take over. */ 298 /* not in dpc. schedule it for dpc to take over. */
294 DEBUG(printk("%s(%ld): timeout schedule " 299 ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
295 "isp_abort_needed.\n", __func__, 300 "Timeout, schedule isp_abort_needed.\n");
296 base_vha->host_no));
297 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
298 "isp_abort_needed.\n", __func__,
299 base_vha->host_no));
300 301
301 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 302 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
302 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 303 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
303 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 304 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
304 305
305 qla_printk(KERN_WARNING, ha, 306 ql_log(ql_log_info, base_vha, 0x101c,
306 "Mailbox command timeout occurred. " 307 "Mailbox cmd timeout occured. "
307 "Scheduling ISP " "abort. eeh_busy: 0x%x\n", 308 "Scheduling ISP abort eeh_busy=0x%x.\n",
308 ha->flags.eeh_busy); 309 ha->flags.eeh_busy);
309 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 310 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
310 qla2xxx_wake_dpc(vha); 311 qla2xxx_wake_dpc(vha);
311 } 312 }
312 } else if (!abort_active) { 313 } else if (!abort_active) {
313 /* call abort directly since we are in the DPC thread */ 314 /* call abort directly since we are in the DPC thread */
314 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 315 ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
315 __func__, base_vha->host_no)); 316 "Timeout, calling abort_isp.\n");
316 DEBUG2_3_11(printk("%s(%ld): timeout calling "
317 "abort_isp\n", __func__, base_vha->host_no));
318 317
319 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 318 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
320 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 319 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
321 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 320 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
322 321
323 qla_printk(KERN_WARNING, ha, 322 ql_log(ql_log_info, base_vha, 0x101e,
324 "Mailbox command timeout occurred. " 323 "Mailbox cmd timeout occured. "
325 "Issuing ISP abort.\n"); 324 "Scheduling ISP abort.\n");
326 325
327 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 326 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
328 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 327 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -332,11 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
332 &vha->dpc_flags); 331 &vha->dpc_flags);
333 } 332 }
334 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 333 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
335 DEBUG(printk("%s(%ld): finished abort_isp\n", 334 ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
336 __func__, vha->host_no)); 335 "Finished abort_isp.\n");
337 DEBUG2_3_11(printk(
338 "%s(%ld): finished abort_isp\n",
339 __func__, vha->host_no));
340 } 336 }
341 } 337 }
342 } 338 }
@@ -346,12 +342,11 @@ premature_exit:
346 complete(&ha->mbx_cmd_comp); 342 complete(&ha->mbx_cmd_comp);
347 343
348 if (rval) { 344 if (rval) {
349 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 345 ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
350 "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no, 346 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
351 mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); 347 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
352 } else { 348 } else {
353 DEBUG11(printk("%s(%ld): done.\n", __func__, 349 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
354 base_vha->host_no));
355 } 350 }
356 351
357 return rval; 352 return rval;
@@ -366,7 +361,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
366 mbx_cmd_t mc; 361 mbx_cmd_t mc;
367 mbx_cmd_t *mcp = &mc; 362 mbx_cmd_t *mcp = &mc;
368 363
369 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 364 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
370 365
371 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 366 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
372 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 367 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -397,10 +392,10 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
397 rval = qla2x00_mailbox_command(vha, mcp); 392 rval = qla2x00_mailbox_command(vha, mcp);
398 393
399 if (rval != QLA_SUCCESS) { 394 if (rval != QLA_SUCCESS) {
400 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 395 ql_dbg(ql_dbg_mbx, vha, 0x1023,
401 vha->host_no, rval, mcp->mb[0])); 396 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
402 } else { 397 } else {
403 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 398 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
404 } 399 }
405 400
406 return rval; 401 return rval;
@@ -430,7 +425,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
430 mbx_cmd_t mc; 425 mbx_cmd_t mc;
431 mbx_cmd_t *mcp = &mc; 426 mbx_cmd_t *mcp = &mc;
432 427
433 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 428 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
434 429
435 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 430 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
436 mcp->out_mb = MBX_0; 431 mcp->out_mb = MBX_0;
@@ -461,15 +456,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
461 rval = qla2x00_mailbox_command(vha, mcp); 456 rval = qla2x00_mailbox_command(vha, mcp);
462 457
463 if (rval != QLA_SUCCESS) { 458 if (rval != QLA_SUCCESS) {
464 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 459 ql_dbg(ql_dbg_mbx, vha, 0x1026,
465 vha->host_no, rval, mcp->mb[0])); 460 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
466 } else { 461 } else {
467 if (IS_FWI2_CAPABLE(ha)) { 462 if (IS_FWI2_CAPABLE(ha)) {
468 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 463 ql_dbg(ql_dbg_mbx, vha, 0x1027,
469 __func__, vha->host_no, mcp->mb[1])); 464 "Done exchanges=%x.\n", mcp->mb[1]);
470 } else { 465 } else {
471 DEBUG11(printk("%s(%ld): done.\n", __func__, 466 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
472 vha->host_no));
473 } 467 }
474 } 468 }
475 469
@@ -501,7 +495,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
501 mbx_cmd_t mc; 495 mbx_cmd_t mc;
502 mbx_cmd_t *mcp = &mc; 496 mbx_cmd_t *mcp = &mc;
503 497
504 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 498 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
505 499
506 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 500 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
507 mcp->out_mb = MBX_0; 501 mcp->out_mb = MBX_0;
@@ -535,11 +529,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
535failed: 529failed:
536 if (rval != QLA_SUCCESS) { 530 if (rval != QLA_SUCCESS) {
537 /*EMPTY*/ 531 /*EMPTY*/
538 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 532 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
539 vha->host_no, rval));
540 } else { 533 } else {
541 /*EMPTY*/ 534 /*EMPTY*/
542 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 535 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
543 } 536 }
544 return rval; 537 return rval;
545} 538}
@@ -565,7 +558,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
565 mbx_cmd_t mc; 558 mbx_cmd_t mc;
566 mbx_cmd_t *mcp = &mc; 559 mbx_cmd_t *mcp = &mc;
567 560
568 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 561 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
569 562
570 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 563 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
571 mcp->out_mb = MBX_0; 564 mcp->out_mb = MBX_0;
@@ -576,15 +569,14 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
576 569
577 if (rval != QLA_SUCCESS) { 570 if (rval != QLA_SUCCESS) {
578 /*EMPTY*/ 571 /*EMPTY*/
579 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 572 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
580 vha->host_no, rval));
581 } else { 573 } else {
582 fwopts[0] = mcp->mb[0]; 574 fwopts[0] = mcp->mb[0];
583 fwopts[1] = mcp->mb[1]; 575 fwopts[1] = mcp->mb[1];
584 fwopts[2] = mcp->mb[2]; 576 fwopts[2] = mcp->mb[2];
585 fwopts[3] = mcp->mb[3]; 577 fwopts[3] = mcp->mb[3];
586 578
587 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 579 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
588 } 580 }
589 581
590 return rval; 582 return rval;
@@ -612,7 +604,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
612 mbx_cmd_t mc; 604 mbx_cmd_t mc;
613 mbx_cmd_t *mcp = &mc; 605 mbx_cmd_t *mcp = &mc;
614 606
615 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 607 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
616 608
617 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 609 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
618 mcp->mb[1] = fwopts[1]; 610 mcp->mb[1] = fwopts[1];
@@ -636,11 +628,11 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
636 628
637 if (rval != QLA_SUCCESS) { 629 if (rval != QLA_SUCCESS) {
638 /*EMPTY*/ 630 /*EMPTY*/
639 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, 631 ql_dbg(ql_dbg_mbx, vha, 0x1030,
640 vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 632 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
641 } else { 633 } else {
642 /*EMPTY*/ 634 /*EMPTY*/
643 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 635 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
644 } 636 }
645 637
646 return rval; 638 return rval;
@@ -668,7 +660,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
668 mbx_cmd_t mc; 660 mbx_cmd_t mc;
669 mbx_cmd_t *mcp = &mc; 661 mbx_cmd_t *mcp = &mc;
670 662
671 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no)); 663 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
672 664
673 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 665 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
674 mcp->mb[1] = 0xAAAA; 666 mcp->mb[1] = 0xAAAA;
@@ -695,12 +687,10 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
695 687
696 if (rval != QLA_SUCCESS) { 688 if (rval != QLA_SUCCESS) {
697 /*EMPTY*/ 689 /*EMPTY*/
698 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 690 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
699 vha->host_no, rval));
700 } else { 691 } else {
701 /*EMPTY*/ 692 /*EMPTY*/
702 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 693 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
703 vha->host_no));
704 } 694 }
705 695
706 return rval; 696 return rval;
@@ -728,7 +718,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
728 mbx_cmd_t mc; 718 mbx_cmd_t mc;
729 mbx_cmd_t *mcp = &mc; 719 mbx_cmd_t *mcp = &mc;
730 720
731 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 721 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
732 722
733 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 723 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
734 mcp->out_mb = MBX_0; 724 mcp->out_mb = MBX_0;
@@ -749,11 +739,11 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
749 rval = qla2x00_mailbox_command(vha, mcp); 739 rval = qla2x00_mailbox_command(vha, mcp);
750 740
751 if (rval != QLA_SUCCESS) { 741 if (rval != QLA_SUCCESS) {
752 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 742 ql_dbg(ql_dbg_mbx, vha, 0x1036,
753 vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ? 743 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
754 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); 744 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
755 } else { 745 } else {
756 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 746 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
757 } 747 }
758 748
759 return rval; 749 return rval;
@@ -785,6 +775,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
785 mbx_cmd_t mc; 775 mbx_cmd_t mc;
786 mbx_cmd_t *mcp = &mc; 776 mbx_cmd_t *mcp = &mc;
787 777
778 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
779
788 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 780 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
789 mcp->mb[1] = 0; 781 mcp->mb[1] = 0;
790 mcp->mb[2] = MSW(phys_addr); 782 mcp->mb[2] = MSW(phys_addr);
@@ -799,14 +791,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
799 791
800 if (rval != QLA_SUCCESS) { 792 if (rval != QLA_SUCCESS) {
801 /*EMPTY*/ 793 /*EMPTY*/
802 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 794 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
803 vha->host_no, rval));
804 } else { 795 } else {
805 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 796 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
806 797
807 /* Mask reserved bits. */ 798 /* Mask reserved bits. */
808 sts_entry->entry_status &= 799 sts_entry->entry_status &=
809 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 800 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
801 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
810 } 802 }
811 803
812 return rval; 804 return rval;
@@ -847,7 +839,7 @@ qla2x00_abort_command(srb_t *sp)
847 struct qla_hw_data *ha = vha->hw; 839 struct qla_hw_data *ha = vha->hw;
848 struct req_que *req = vha->req; 840 struct req_que *req = vha->req;
849 841
850 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 842 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
851 843
852 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
853 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 845 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -876,11 +868,9 @@ qla2x00_abort_command(srb_t *sp)
876 rval = qla2x00_mailbox_command(vha, mcp); 868 rval = qla2x00_mailbox_command(vha, mcp);
877 869
878 if (rval != QLA_SUCCESS) { 870 if (rval != QLA_SUCCESS) {
879 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 871 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
880 vha->host_no, rval));
881 } else { 872 } else {
882 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 873 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
883 vha->host_no));
884 } 874 }
885 875
886 return rval; 876 return rval;
@@ -896,10 +886,11 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
896 struct req_que *req; 886 struct req_que *req;
897 struct rsp_que *rsp; 887 struct rsp_que *rsp;
898 888
899 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
900
901 l = l; 889 l = l;
902 vha = fcport->vha; 890 vha = fcport->vha;
891
892 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
893
903 req = vha->hw->req_q_map[0]; 894 req = vha->hw->req_q_map[0];
904 rsp = req->rsp; 895 rsp = req->rsp;
905 mcp->mb[0] = MBC_ABORT_TARGET; 896 mcp->mb[0] = MBC_ABORT_TARGET;
@@ -919,18 +910,17 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
919 mcp->flags = 0; 910 mcp->flags = 0;
920 rval = qla2x00_mailbox_command(vha, mcp); 911 rval = qla2x00_mailbox_command(vha, mcp);
921 if (rval != QLA_SUCCESS) { 912 if (rval != QLA_SUCCESS) {
922 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 913 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
923 vha->host_no, rval));
924 } 914 }
925 915
926 /* Issue marker IOCB. */ 916 /* Issue marker IOCB. */
927 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, 917 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
928 MK_SYNC_ID); 918 MK_SYNC_ID);
929 if (rval2 != QLA_SUCCESS) { 919 if (rval2 != QLA_SUCCESS) {
930 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 920 ql_dbg(ql_dbg_mbx, vha, 0x1040,
931 "(%x).\n", __func__, vha->host_no, rval2)); 921 "Failed to issue marker IOCB (%x).\n", rval2);
932 } else { 922 } else {
933 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 923 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
934 } 924 }
935 925
936 return rval; 926 return rval;
@@ -946,9 +936,10 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
946 struct req_que *req; 936 struct req_que *req;
947 struct rsp_que *rsp; 937 struct rsp_que *rsp;
948 938
949 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
950
951 vha = fcport->vha; 939 vha = fcport->vha;
940
941 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
942
952 req = vha->hw->req_q_map[0]; 943 req = vha->hw->req_q_map[0];
953 rsp = req->rsp; 944 rsp = req->rsp;
954 mcp->mb[0] = MBC_LUN_RESET; 945 mcp->mb[0] = MBC_LUN_RESET;
@@ -966,18 +957,17 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
966 mcp->flags = 0; 957 mcp->flags = 0;
967 rval = qla2x00_mailbox_command(vha, mcp); 958 rval = qla2x00_mailbox_command(vha, mcp);
968 if (rval != QLA_SUCCESS) { 959 if (rval != QLA_SUCCESS) {
969 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 960 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
970 vha->host_no, rval));
971 } 961 }
972 962
973 /* Issue marker IOCB. */ 963 /* Issue marker IOCB. */
974 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 964 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
975 MK_SYNC_ID_LUN); 965 MK_SYNC_ID_LUN);
976 if (rval2 != QLA_SUCCESS) { 966 if (rval2 != QLA_SUCCESS) {
977 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 967 ql_dbg(ql_dbg_mbx, vha, 0x1044,
978 "(%x).\n", __func__, vha->host_no, rval2)); 968 "Failed to issue marker IOCB (%x).\n", rval2);
979 } else { 969 } else {
980 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 970 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
981 } 971 }
982 972
983 return rval; 973 return rval;
@@ -1011,8 +1001,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1011 mbx_cmd_t mc; 1001 mbx_cmd_t mc;
1012 mbx_cmd_t *mcp = &mc; 1002 mbx_cmd_t *mcp = &mc;
1013 1003
1014 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 1004 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
1015 vha->host_no));
1016 1005
1017 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1006 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1018 mcp->mb[9] = vha->vp_idx; 1007 mcp->mb[9] = vha->vp_idx;
@@ -1038,11 +1027,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1038 1027
1039 if (rval != QLA_SUCCESS) { 1028 if (rval != QLA_SUCCESS) {
1040 /*EMPTY*/ 1029 /*EMPTY*/
1041 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 1030 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1042 vha->host_no, rval));
1043 } else { 1031 } else {
1044 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 1032 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
1045 vha->host_no));
1046 1033
1047 if (IS_QLA8XXX_TYPE(vha->hw)) { 1034 if (IS_QLA8XXX_TYPE(vha->hw)) {
1048 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1035 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1083,8 +1070,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1083 mbx_cmd_t mc; 1070 mbx_cmd_t mc;
1084 mbx_cmd_t *mcp = &mc; 1071 mbx_cmd_t *mcp = &mc;
1085 1072
1086 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 1073 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
1087 vha->host_no));
1088 1074
1089 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1075 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1090 mcp->out_mb = MBX_0; 1076 mcp->out_mb = MBX_0;
@@ -1095,8 +1081,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1095 1081
1096 if (rval != QLA_SUCCESS) { 1082 if (rval != QLA_SUCCESS) {
1097 /*EMPTY*/ 1083 /*EMPTY*/
1098 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 1084 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1099 vha->host_no, mcp->mb[0])); 1085 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1100 } else { 1086 } else {
1101 /* Convert returned data and check our values. */ 1087 /* Convert returned data and check our values. */
1102 *r_a_tov = mcp->mb[3] / 2; 1088 *r_a_tov = mcp->mb[3] / 2;
@@ -1107,8 +1093,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1107 *tov = ratov; 1093 *tov = ratov;
1108 } 1094 }
1109 1095
1110 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 1096 ql_dbg(ql_dbg_mbx, vha, 0x104b,
1111 "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov)); 1097 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1112 } 1098 }
1113 1099
1114 return rval; 1100 return rval;
@@ -1139,8 +1125,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1139 mbx_cmd_t *mcp = &mc; 1125 mbx_cmd_t *mcp = &mc;
1140 struct qla_hw_data *ha = vha->hw; 1126 struct qla_hw_data *ha = vha->hw;
1141 1127
1142 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1128 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
1143 vha->host_no));
1144 1129
1145 if (IS_QLA82XX(ha) && ql2xdbwr) 1130 if (IS_QLA82XX(ha) && ql2xdbwr)
1146 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1131 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1174,13 +1159,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1174 1159
1175 if (rval != QLA_SUCCESS) { 1160 if (rval != QLA_SUCCESS) {
1176 /*EMPTY*/ 1161 /*EMPTY*/
1177 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1162 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1178 "mb0=%x.\n", 1163 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1179 vha->host_no, rval, mcp->mb[0]));
1180 } else { 1164 } else {
1181 /*EMPTY*/ 1165 /*EMPTY*/
1182 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1166 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
1183 vha->host_no));
1184 } 1167 }
1185 1168
1186 return rval; 1169 return rval;
@@ -1213,13 +1196,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1213 dma_addr_t pd_dma; 1196 dma_addr_t pd_dma;
1214 struct qla_hw_data *ha = vha->hw; 1197 struct qla_hw_data *ha = vha->hw;
1215 1198
1216 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1199 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
1217 1200
1218 pd24 = NULL; 1201 pd24 = NULL;
1219 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1202 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1220 if (pd == NULL) { 1203 if (pd == NULL) {
1221 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " 1204 ql_log(ql_log_warn, vha, 0x1050,
1222 "structure.\n", __func__, vha->host_no)); 1205 "Failed to allocate port database structure.\n");
1223 return QLA_MEMORY_ALLOC_FAILED; 1206 return QLA_MEMORY_ALLOC_FAILED;
1224 } 1207 }
1225 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1208 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1261,12 +1244,10 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1261 /* Check for logged in state. */ 1244 /* Check for logged in state. */
1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE && 1245 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1263 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1246 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1264 DEBUG2(qla_printk(KERN_WARNING, ha, 1247 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1265 "scsi(%ld): Unable to verify login-state (%x/%x) " 1248 "Unable to verify login-state (%x/%x) for "
1266 " - portid=%02x%02x%02x.\n", vha->host_no, 1249 "loop_id %x.\n", pd24->current_login_state,
1267 pd24->current_login_state, pd24->last_login_state, 1250 pd24->last_login_state, fcport->loop_id);
1268 fcport->d_id.b.domain, fcport->d_id.b.area,
1269 fcport->d_id.b.al_pa));
1270 rval = QLA_FUNCTION_FAILED; 1251 rval = QLA_FUNCTION_FAILED;
1271 goto gpd_error_out; 1252 goto gpd_error_out;
1272 } 1253 }
@@ -1290,12 +1271,11 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1290 /* Check for logged in state. */ 1271 /* Check for logged in state. */
1291 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1272 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1292 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1273 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1293 DEBUG2(qla_printk(KERN_WARNING, ha, 1274 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1294 "scsi(%ld): Unable to verify login-state (%x/%x) " 1275 "Unable to verify login-state (%x/%x) - "
1295 " - portid=%02x%02x%02x.\n", vha->host_no, 1276 "portid=%02x%02x%02x.\n", pd->master_state,
1296 pd->master_state, pd->slave_state, 1277 pd->slave_state, fcport->d_id.b.domain,
1297 fcport->d_id.b.domain, fcport->d_id.b.area, 1278 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1298 fcport->d_id.b.al_pa));
1299 rval = QLA_FUNCTION_FAILED; 1279 rval = QLA_FUNCTION_FAILED;
1300 goto gpd_error_out; 1280 goto gpd_error_out;
1301 } 1281 }
@@ -1325,10 +1305,11 @@ gpd_error_out:
1325 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1305 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1326 1306
1327 if (rval != QLA_SUCCESS) { 1307 if (rval != QLA_SUCCESS) {
1328 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 1308 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1329 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1309 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1310 mcp->mb[0], mcp->mb[1]);
1330 } else { 1311 } else {
1331 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1312 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
1332 } 1313 }
1333 1314
1334 return rval; 1315 return rval;
@@ -1357,8 +1338,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1357 mbx_cmd_t mc; 1338 mbx_cmd_t mc;
1358 mbx_cmd_t *mcp = &mc; 1339 mbx_cmd_t *mcp = &mc;
1359 1340
1360 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1341 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
1361 vha->host_no));
1362 1342
1363 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1343 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1364 mcp->out_mb = MBX_0; 1344 mcp->out_mb = MBX_0;
@@ -1381,12 +1361,10 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1381 1361
1382 if (rval != QLA_SUCCESS) { 1362 if (rval != QLA_SUCCESS) {
1383 /*EMPTY*/ 1363 /*EMPTY*/
1384 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1364 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1385 "failed=%x.\n", vha->host_no, rval));
1386 } else { 1365 } else {
1387 /*EMPTY*/ 1366 /*EMPTY*/
1388 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1367 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
1389 vha->host_no));
1390 } 1368 }
1391 1369
1392 return rval; 1370 return rval;
@@ -1418,8 +1396,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1418 mbx_cmd_t mc; 1396 mbx_cmd_t mc;
1419 mbx_cmd_t *mcp = &mc; 1397 mbx_cmd_t *mcp = &mc;
1420 1398
1421 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1399 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
1422 vha->host_no));
1423 1400
1424 mcp->mb[0] = MBC_GET_PORT_NAME; 1401 mcp->mb[0] = MBC_GET_PORT_NAME;
1425 mcp->mb[9] = vha->vp_idx; 1402 mcp->mb[9] = vha->vp_idx;
@@ -1439,8 +1416,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1439 1416
1440 if (rval != QLA_SUCCESS) { 1417 if (rval != QLA_SUCCESS) {
1441 /*EMPTY*/ 1418 /*EMPTY*/
1442 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1419 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1443 vha->host_no, rval));
1444 } else { 1420 } else {
1445 if (name != NULL) { 1421 if (name != NULL) {
1446 /* This function returns name in big endian. */ 1422 /* This function returns name in big endian. */
@@ -1454,8 +1430,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1454 name[7] = LSB(mcp->mb[7]); 1430 name[7] = LSB(mcp->mb[7]);
1455 } 1431 }
1456 1432
1457 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1433 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
1458 vha->host_no));
1459 } 1434 }
1460 1435
1461 return rval; 1436 return rval;
@@ -1483,7 +1458,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1483 mbx_cmd_t mc; 1458 mbx_cmd_t mc;
1484 mbx_cmd_t *mcp = &mc; 1459 mbx_cmd_t *mcp = &mc;
1485 1460
1486 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1461 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
1487 1462
1488 if (IS_QLA8XXX_TYPE(vha->hw)) { 1463 if (IS_QLA8XXX_TYPE(vha->hw)) {
1489 /* Logout across all FCFs. */ 1464 /* Logout across all FCFs. */
@@ -1517,11 +1492,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1517 1492
1518 if (rval != QLA_SUCCESS) { 1493 if (rval != QLA_SUCCESS) {
1519 /*EMPTY*/ 1494 /*EMPTY*/
1520 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1495 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1521 __func__, vha->host_no, rval));
1522 } else { 1496 } else {
1523 /*EMPTY*/ 1497 /*EMPTY*/
1524 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1498 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
1525 } 1499 }
1526 1500
1527 return rval; 1501 return rval;
@@ -1553,12 +1527,11 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1553 mbx_cmd_t mc; 1527 mbx_cmd_t mc;
1554 mbx_cmd_t *mcp = &mc; 1528 mbx_cmd_t *mcp = &mc;
1555 1529
1556 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1530 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
1557 vha->host_no));
1558 1531
1559 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1532 ql_dbg(ql_dbg_mbx, vha, 0x105e,
1560 "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, 1533 "Retry cnt=%d ratov=%d total tov=%d.\n",
1561 mcp->tov)); 1534 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1562 1535
1563 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1536 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1564 mcp->mb[1] = cmd_size; 1537 mcp->mb[1] = cmd_size;
@@ -1575,13 +1548,12 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1575 1548
1576 if (rval != QLA_SUCCESS) { 1549 if (rval != QLA_SUCCESS) {
1577 /*EMPTY*/ 1550 /*EMPTY*/
1578 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1551 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1579 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1552 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1580 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1553 rval, mcp->mb[0], mcp->mb[1]);
1581 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1582 } else { 1554 } else {
1583 /*EMPTY*/ 1555 /*EMPTY*/
1584 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no)); 1556 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
1585 } 1557 }
1586 1558
1587 return rval; 1559 return rval;
@@ -1600,7 +1572,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1600 struct req_que *req; 1572 struct req_que *req;
1601 struct rsp_que *rsp; 1573 struct rsp_que *rsp;
1602 1574
1603 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1575 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
1604 1576
1605 if (ha->flags.cpu_affinity_enabled) 1577 if (ha->flags.cpu_affinity_enabled)
1606 req = ha->req_q_map[0]; 1578 req = ha->req_q_map[0];
@@ -1610,8 +1582,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1610 1582
1611 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1583 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1612 if (lg == NULL) { 1584 if (lg == NULL) {
1613 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1585 ql_log(ql_log_warn, vha, 0x1062,
1614 __func__, vha->host_no)); 1586 "Failed to allocate login IOCB.\n");
1615 return QLA_MEMORY_ALLOC_FAILED; 1587 return QLA_MEMORY_ALLOC_FAILED;
1616 } 1588 }
1617 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1589 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1631,21 +1603,21 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1631 lg->vp_index = vha->vp_idx; 1603 lg->vp_index = vha->vp_idx;
1632 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1604 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1633 if (rval != QLA_SUCCESS) { 1605 if (rval != QLA_SUCCESS) {
1634 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1606 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1635 "(%x).\n", __func__, vha->host_no, rval)); 1607 "Failed to issue login IOCB (%x).\n", rval);
1636 } else if (lg->entry_status != 0) { 1608 } else if (lg->entry_status != 0) {
1637 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1609 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1638 "-- error status (%x).\n", __func__, vha->host_no, 1610 "Failed to complete IOCB -- error status (%x).\n",
1639 lg->entry_status)); 1611 lg->entry_status);
1640 rval = QLA_FUNCTION_FAILED; 1612 rval = QLA_FUNCTION_FAILED;
1641 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1613 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1642 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1614 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1643 iop[1] = le32_to_cpu(lg->io_parameter[1]); 1615 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1644 1616
1645 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1617 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1646 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1618 "Failed to complete IOCB -- completion status (%x) "
1647 vha->host_no, le16_to_cpu(lg->comp_status), iop[0], 1619 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1648 iop[1])); 1620 iop[0], iop[1]);
1649 1621
1650 switch (iop[0]) { 1622 switch (iop[0]) {
1651 case LSC_SCODE_PORTID_USED: 1623 case LSC_SCODE_PORTID_USED:
@@ -1673,7 +1645,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1673 break; 1645 break;
1674 } 1646 }
1675 } else { 1647 } else {
1676 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1648 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
1677 1649
1678 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1650 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1679 1651
@@ -1728,7 +1700,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1728 mbx_cmd_t *mcp = &mc; 1700 mbx_cmd_t *mcp = &mc;
1729 struct qla_hw_data *ha = vha->hw; 1701 struct qla_hw_data *ha = vha->hw;
1730 1702
1731 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no)); 1703 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
1732 1704
1733 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1705 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1734 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1706 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1771,13 +1743,12 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1771 rval = QLA_SUCCESS; 1743 rval = QLA_SUCCESS;
1772 1744
1773 /*EMPTY*/ 1745 /*EMPTY*/
1774 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1746 ql_dbg(ql_dbg_mbx, vha, 0x1068,
1775 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval, 1747 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
1776 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 1748 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1777 } else { 1749 } else {
1778 /*EMPTY*/ 1750 /*EMPTY*/
1779 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1751 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
1780 vha->host_no));
1781 } 1752 }
1782 1753
1783 return rval; 1754 return rval;
@@ -1808,13 +1779,13 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1808 mbx_cmd_t *mcp = &mc; 1779 mbx_cmd_t *mcp = &mc;
1809 struct qla_hw_data *ha = vha->hw; 1780 struct qla_hw_data *ha = vha->hw;
1810 1781
1782 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
1783
1811 if (IS_FWI2_CAPABLE(ha)) 1784 if (IS_FWI2_CAPABLE(ha))
1812 return qla24xx_login_fabric(vha, fcport->loop_id, 1785 return qla24xx_login_fabric(vha, fcport->loop_id,
1813 fcport->d_id.b.domain, fcport->d_id.b.area, 1786 fcport->d_id.b.domain, fcport->d_id.b.area,
1814 fcport->d_id.b.al_pa, mb_ret, opt); 1787 fcport->d_id.b.al_pa, mb_ret, opt);
1815 1788
1816 DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1817
1818 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1789 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1819 if (HAS_EXTENDED_IDS(ha)) 1790 if (HAS_EXTENDED_IDS(ha))
1820 mcp->mb[1] = fcport->loop_id; 1791 mcp->mb[1] = fcport->loop_id;
@@ -1845,15 +1816,12 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1845 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 1816 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
1846 rval = QLA_SUCCESS; 1817 rval = QLA_SUCCESS;
1847 1818
1848 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1819 ql_dbg(ql_dbg_mbx, vha, 0x106b,
1849 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval, 1820 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
1850 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1821 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1851 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1852 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1853 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1854 } else { 1822 } else {
1855 /*EMPTY*/ 1823 /*EMPTY*/
1856 DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1824 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
1857 } 1825 }
1858 1826
1859 return (rval); 1827 return (rval);
@@ -1870,12 +1838,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1870 struct req_que *req; 1838 struct req_que *req;
1871 struct rsp_que *rsp; 1839 struct rsp_que *rsp;
1872 1840
1873 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1841 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
1874 1842
1875 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1843 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1876 if (lg == NULL) { 1844 if (lg == NULL) {
1877 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", 1845 ql_log(ql_log_warn, vha, 0x106e,
1878 __func__, vha->host_no)); 1846 "Failed to allocate logout IOCB.\n");
1879 return QLA_MEMORY_ALLOC_FAILED; 1847 return QLA_MEMORY_ALLOC_FAILED;
1880 } 1848 }
1881 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1849 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1899,22 +1867,22 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1899 1867
1900 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1868 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1901 if (rval != QLA_SUCCESS) { 1869 if (rval != QLA_SUCCESS) {
1902 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1870 ql_dbg(ql_dbg_mbx, vha, 0x106f,
1903 "(%x).\n", __func__, vha->host_no, rval)); 1871 "Failed to issue logout IOCB (%x).\n", rval);
1904 } else if (lg->entry_status != 0) { 1872 } else if (lg->entry_status != 0) {
1905 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1873 ql_dbg(ql_dbg_mbx, vha, 0x1070,
1906 "-- error status (%x).\n", __func__, vha->host_no, 1874 "Failed to complete IOCB -- error status (%x).\n",
1907 lg->entry_status)); 1875 lg->entry_status);
1908 rval = QLA_FUNCTION_FAILED; 1876 rval = QLA_FUNCTION_FAILED;
1909 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1877 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1910 DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB " 1878 ql_dbg(ql_dbg_mbx, vha, 0x1071,
1911 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1879 "Failed to complete IOCB -- completion status (%x) "
1912 vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status), 1880 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1913 le32_to_cpu(lg->io_parameter[0]), 1881 le32_to_cpu(lg->io_parameter[0]),
1914 le32_to_cpu(lg->io_parameter[1]))); 1882 le32_to_cpu(lg->io_parameter[1]));
1915 } else { 1883 } else {
1916 /*EMPTY*/ 1884 /*EMPTY*/
1917 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1885 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
1918 } 1886 }
1919 1887
1920 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1888 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1946,8 +1914,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1946 mbx_cmd_t mc; 1914 mbx_cmd_t mc;
1947 mbx_cmd_t *mcp = &mc; 1915 mbx_cmd_t *mcp = &mc;
1948 1916
1949 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1917 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
1950 vha->host_no));
1951 1918
1952 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1919 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1953 mcp->out_mb = MBX_1|MBX_0; 1920 mcp->out_mb = MBX_1|MBX_0;
@@ -1966,12 +1933,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1966 1933
1967 if (rval != QLA_SUCCESS) { 1934 if (rval != QLA_SUCCESS) {
1968 /*EMPTY*/ 1935 /*EMPTY*/
1969 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1936 ql_dbg(ql_dbg_mbx, vha, 0x1074,
1970 "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1])); 1937 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
1971 } else { 1938 } else {
1972 /*EMPTY*/ 1939 /*EMPTY*/
1973 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1940 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
1974 vha->host_no));
1975 } 1941 }
1976 1942
1977 return rval; 1943 return rval;
@@ -1999,8 +1965,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1999 mbx_cmd_t mc; 1965 mbx_cmd_t mc;
2000 mbx_cmd_t *mcp = &mc; 1966 mbx_cmd_t *mcp = &mc;
2001 1967
2002 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1968 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
2003 vha->host_no));
2004 1969
2005 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1970 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2006 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 1971 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2014,12 +1979,10 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2014 1979
2015 if (rval != QLA_SUCCESS) { 1980 if (rval != QLA_SUCCESS) {
2016 /*EMPTY*/ 1981 /*EMPTY*/
2017 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1982 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2018 vha->host_no, rval));
2019 } else { 1983 } else {
2020 /*EMPTY*/ 1984 /*EMPTY*/
2021 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1985 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
2022 vha->host_no));
2023 } 1986 }
2024 1987
2025 return rval; 1988 return rval;
@@ -2045,8 +2008,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2045 mbx_cmd_t mc; 2008 mbx_cmd_t mc;
2046 mbx_cmd_t *mcp = &mc; 2009 mbx_cmd_t *mcp = &mc;
2047 2010
2048 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 2011 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
2049 vha->host_no));
2050 2012
2051 if (id_list == NULL) 2013 if (id_list == NULL)
2052 return QLA_FUNCTION_FAILED; 2014 return QLA_FUNCTION_FAILED;
@@ -2075,12 +2037,10 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2075 2037
2076 if (rval != QLA_SUCCESS) { 2038 if (rval != QLA_SUCCESS) {
2077 /*EMPTY*/ 2039 /*EMPTY*/
2078 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 2040 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2079 vha->host_no, rval));
2080 } else { 2041 } else {
2081 *entries = mcp->mb[1]; 2042 *entries = mcp->mb[1];
2082 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 2043 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
2083 vha->host_no));
2084 } 2044 }
2085 2045
2086 return rval; 2046 return rval;
@@ -2108,7 +2068,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2108 mbx_cmd_t mc; 2068 mbx_cmd_t mc;
2109 mbx_cmd_t *mcp = &mc; 2069 mbx_cmd_t *mcp = &mc;
2110 2070
2111 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2071 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
2112 2072
2113 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2073 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2114 mcp->out_mb = MBX_0; 2074 mcp->out_mb = MBX_0;
@@ -2121,14 +2081,14 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2121 2081
2122 if (rval != QLA_SUCCESS) { 2082 if (rval != QLA_SUCCESS) {
2123 /*EMPTY*/ 2083 /*EMPTY*/
2124 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 2084 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2125 vha->host_no, mcp->mb[0])); 2085 "Failed mb[0]=%x.\n", mcp->mb[0]);
2126 } else { 2086 } else {
2127 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 2087 ql_dbg(ql_dbg_mbx, vha, 0x107e,
2128 "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__, 2088 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2129 vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3], 2089 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2130 mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], 2090 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2131 mcp->mb[12])); 2091 mcp->mb[11], mcp->mb[12]);
2132 2092
2133 if (cur_xchg_cnt) 2093 if (cur_xchg_cnt)
2134 *cur_xchg_cnt = mcp->mb[3]; 2094 *cur_xchg_cnt = mcp->mb[3];
@@ -2147,7 +2107,6 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2147 return (rval); 2107 return (rval);
2148} 2108}
2149 2109
2150#if defined(QL_DEBUG_LEVEL_3)
2151/* 2110/*
2152 * qla2x00_get_fcal_position_map 2111 * qla2x00_get_fcal_position_map
2153 * Get FCAL (LILP) position map using mailbox command 2112 * Get FCAL (LILP) position map using mailbox command
@@ -2172,10 +2131,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2172 dma_addr_t pmap_dma; 2131 dma_addr_t pmap_dma;
2173 struct qla_hw_data *ha = vha->hw; 2132 struct qla_hw_data *ha = vha->hw;
2174 2133
2134 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
2135
2175 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2136 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2176 if (pmap == NULL) { 2137 if (pmap == NULL) {
2177 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2138 ql_log(ql_log_warn, vha, 0x1080,
2178 __func__, vha->host_no)); 2139 "Memory alloc failed.\n");
2179 return QLA_MEMORY_ALLOC_FAILED; 2140 return QLA_MEMORY_ALLOC_FAILED;
2180 } 2141 }
2181 memset(pmap, 0, FCAL_MAP_SIZE); 2142 memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2193,10 +2154,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2193 rval = qla2x00_mailbox_command(vha, mcp); 2154 rval = qla2x00_mailbox_command(vha, mcp);
2194 2155
2195 if (rval == QLA_SUCCESS) { 2156 if (rval == QLA_SUCCESS) {
2196 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " 2157 ql_dbg(ql_dbg_mbx, vha, 0x1081,
2197 "size (%x)\n", __func__, vha->host_no, mcp->mb[0], 2158 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2198 mcp->mb[1], (unsigned)pmap[0])); 2159 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2199 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); 2160 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2161 pmap, pmap[0] + 1);
2200 2162
2201 if (pos_map) 2163 if (pos_map)
2202 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2164 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
@@ -2204,15 +2166,13 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2204 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2166 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2205 2167
2206 if (rval != QLA_SUCCESS) { 2168 if (rval != QLA_SUCCESS) {
2207 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2169 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2208 vha->host_no, rval));
2209 } else { 2170 } else {
2210 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2171 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
2211 } 2172 }
2212 2173
2213 return rval; 2174 return rval;
2214} 2175}
2215#endif
2216 2176
2217/* 2177/*
2218 * qla2x00_get_link_status 2178 * qla2x00_get_link_status
@@ -2237,7 +2197,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2237 uint32_t *siter, *diter, dwords; 2197 uint32_t *siter, *diter, dwords;
2238 struct qla_hw_data *ha = vha->hw; 2198 struct qla_hw_data *ha = vha->hw;
2239 2199
2240 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2200 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
2241 2201
2242 mcp->mb[0] = MBC_GET_LINK_STATUS; 2202 mcp->mb[0] = MBC_GET_LINK_STATUS;
2243 mcp->mb[2] = MSW(stats_dma); 2203 mcp->mb[2] = MSW(stats_dma);
@@ -2266,11 +2226,12 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2266 2226
2267 if (rval == QLA_SUCCESS) { 2227 if (rval == QLA_SUCCESS) {
2268 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2228 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2269 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2229 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2270 __func__, vha->host_no, mcp->mb[0])); 2230 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2271 rval = QLA_FUNCTION_FAILED; 2231 rval = QLA_FUNCTION_FAILED;
2272 } else { 2232 } else {
2273 /* Copy over data -- firmware data is LE. */ 2233 /* Copy over data -- firmware data is LE. */
2234 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
2274 dwords = offsetof(struct link_statistics, unused1) / 4; 2235 dwords = offsetof(struct link_statistics, unused1) / 4;
2275 siter = diter = &stats->link_fail_cnt; 2236 siter = diter = &stats->link_fail_cnt;
2276 while (dwords--) 2237 while (dwords--)
@@ -2278,8 +2239,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2278 } 2239 }
2279 } else { 2240 } else {
2280 /* Failed. */ 2241 /* Failed. */
2281 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2242 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2282 vha->host_no, rval));
2283 } 2243 }
2284 2244
2285 return rval; 2245 return rval;
@@ -2294,7 +2254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2294 mbx_cmd_t *mcp = &mc; 2254 mbx_cmd_t *mcp = &mc;
2295 uint32_t *siter, *diter, dwords; 2255 uint32_t *siter, *diter, dwords;
2296 2256
2297 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2257 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
2298 2258
2299 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2259 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2300 mcp->mb[2] = MSW(stats_dma); 2260 mcp->mb[2] = MSW(stats_dma);
@@ -2312,10 +2272,11 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2312 2272
2313 if (rval == QLA_SUCCESS) { 2273 if (rval == QLA_SUCCESS) {
2314 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2274 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2315 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2275 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2316 __func__, vha->host_no, mcp->mb[0])); 2276 "Failed mb[0]=%x.\n", mcp->mb[0]);
2317 rval = QLA_FUNCTION_FAILED; 2277 rval = QLA_FUNCTION_FAILED;
2318 } else { 2278 } else {
2279 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
2319 /* Copy over data -- firmware data is LE. */ 2280 /* Copy over data -- firmware data is LE. */
2320 dwords = sizeof(struct link_statistics) / 4; 2281 dwords = sizeof(struct link_statistics) / 4;
2321 siter = diter = &stats->link_fail_cnt; 2282 siter = diter = &stats->link_fail_cnt;
@@ -2324,8 +2285,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2324 } 2285 }
2325 } else { 2286 } else {
2326 /* Failed. */ 2287 /* Failed. */
2327 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2288 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2328 vha->host_no, rval));
2329 } 2289 }
2330 2290
2331 return rval; 2291 return rval;
@@ -2345,7 +2305,7 @@ qla24xx_abort_command(srb_t *sp)
2345 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
2346 struct req_que *req = vha->req; 2306 struct req_que *req = vha->req;
2347 2307
2348 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2308 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
2349 2309
2350 spin_lock_irqsave(&ha->hardware_lock, flags); 2310 spin_lock_irqsave(&ha->hardware_lock, flags);
2351 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2311 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2360,8 +2320,8 @@ qla24xx_abort_command(srb_t *sp)
2360 2320
2361 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 2321 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2362 if (abt == NULL) { 2322 if (abt == NULL) {
2363 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", 2323 ql_log(ql_log_warn, vha, 0x108d,
2364 __func__, vha->host_no)); 2324 "Failed to allocate abort IOCB.\n");
2365 return QLA_MEMORY_ALLOC_FAILED; 2325 return QLA_MEMORY_ALLOC_FAILED;
2366 } 2326 }
2367 memset(abt, 0, sizeof(struct abort_entry_24xx)); 2327 memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2380,20 +2340,20 @@ qla24xx_abort_command(srb_t *sp)
2380 2340
2381 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 2341 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2382 if (rval != QLA_SUCCESS) { 2342 if (rval != QLA_SUCCESS) {
2383 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2343 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2384 __func__, vha->host_no, rval)); 2344 "Failed to issue IOCB (%x).\n", rval);
2385 } else if (abt->entry_status != 0) { 2345 } else if (abt->entry_status != 0) {
2386 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2346 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2387 "-- error status (%x).\n", __func__, vha->host_no, 2347 "Failed to complete IOCB -- error status (%x).\n",
2388 abt->entry_status)); 2348 abt->entry_status);
2389 rval = QLA_FUNCTION_FAILED; 2349 rval = QLA_FUNCTION_FAILED;
2390 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2350 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2391 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2351 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2392 "-- completion status (%x).\n", __func__, vha->host_no, 2352 "Failed to complete IOCB -- completion status (%x).\n",
2393 le16_to_cpu(abt->nport_handle))); 2353 le16_to_cpu(abt->nport_handle));
2394 rval = QLA_FUNCTION_FAILED; 2354 rval = QLA_FUNCTION_FAILED;
2395 } else { 2355 } else {
2396 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2356 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
2397 } 2357 }
2398 2358
2399 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2359 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2421,19 +2381,20 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2421 struct req_que *req; 2381 struct req_que *req;
2422 struct rsp_que *rsp; 2382 struct rsp_que *rsp;
2423 2383
2424 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
2425
2426 vha = fcport->vha; 2384 vha = fcport->vha;
2427 ha = vha->hw; 2385 ha = vha->hw;
2428 req = vha->req; 2386 req = vha->req;
2387
2388 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
2389
2429 if (ha->flags.cpu_affinity_enabled) 2390 if (ha->flags.cpu_affinity_enabled)
2430 rsp = ha->rsp_q_map[tag + 1]; 2391 rsp = ha->rsp_q_map[tag + 1];
2431 else 2392 else
2432 rsp = req->rsp; 2393 rsp = req->rsp;
2433 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2394 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2434 if (tsk == NULL) { 2395 if (tsk == NULL) {
2435 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2396 ql_log(ql_log_warn, vha, 0x1093,
2436 "IOCB.\n", __func__, vha->host_no)); 2397 "Failed to allocate task management IOCB.\n");
2437 return QLA_MEMORY_ALLOC_FAILED; 2398 return QLA_MEMORY_ALLOC_FAILED;
2438 } 2399 }
2439 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); 2400 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2457,30 +2418,30 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2457 sts = &tsk->p.sts; 2418 sts = &tsk->p.sts;
2458 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 2419 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2459 if (rval != QLA_SUCCESS) { 2420 if (rval != QLA_SUCCESS) {
2460 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " 2421 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2461 "(%x).\n", __func__, vha->host_no, name, rval)); 2422 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2462 } else if (sts->entry_status != 0) { 2423 } else if (sts->entry_status != 0) {
2463 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2424 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2464 "-- error status (%x).\n", __func__, vha->host_no, 2425 "Failed to complete IOCB -- error status (%x).\n",
2465 sts->entry_status)); 2426 sts->entry_status);
2466 rval = QLA_FUNCTION_FAILED; 2427 rval = QLA_FUNCTION_FAILED;
2467 } else if (sts->comp_status != 2428 } else if (sts->comp_status !=
2468 __constant_cpu_to_le16(CS_COMPLETE)) { 2429 __constant_cpu_to_le16(CS_COMPLETE)) {
2469 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2430 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2470 "-- completion status (%x).\n", __func__, 2431 "Failed to complete IOCB -- completion status (%x).\n",
2471 vha->host_no, le16_to_cpu(sts->comp_status))); 2432 le16_to_cpu(sts->comp_status));
2472 rval = QLA_FUNCTION_FAILED; 2433 rval = QLA_FUNCTION_FAILED;
2473 } else if (le16_to_cpu(sts->scsi_status) & 2434 } else if (le16_to_cpu(sts->scsi_status) &
2474 SS_RESPONSE_INFO_LEN_VALID) { 2435 SS_RESPONSE_INFO_LEN_VALID) {
2475 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2436 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2476 DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent " 2437 ql_dbg(ql_dbg_mbx, vha, 0x1097,
2477 "data length -- not enough response info (%d).\n", 2438 "Ignoring inconsistent data length -- not enough "
2478 __func__, vha->host_no, 2439 "response info (%d).\n",
2479 le32_to_cpu(sts->rsp_data_len))); 2440 le32_to_cpu(sts->rsp_data_len));
2480 } else if (sts->data[3]) { 2441 } else if (sts->data[3]) {
2481 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2442 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2482 "-- response (%x).\n", __func__, 2443 "Failed to complete IOCB -- response (%x).\n",
2483 vha->host_no, sts->data[3])); 2444 sts->data[3]);
2484 rval = QLA_FUNCTION_FAILED; 2445 rval = QLA_FUNCTION_FAILED;
2485 } 2446 }
2486 } 2447 }
@@ -2489,10 +2450,10 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2489 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 2450 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2490 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 2451 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2491 if (rval2 != QLA_SUCCESS) { 2452 if (rval2 != QLA_SUCCESS) {
2492 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2453 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2493 "(%x).\n", __func__, vha->host_no, rval2)); 2454 "Failed to issue marker IOCB (%x).\n", rval2);
2494 } else { 2455 } else {
2495 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2456 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
2496 } 2457 }
2497 2458
2498 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 2459 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2533,7 +2494,7 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2533 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2494 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2534 return QLA_FUNCTION_FAILED; 2495 return QLA_FUNCTION_FAILED;
2535 2496
2536 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2497 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
2537 2498
2538 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2499 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2539 mcp->out_mb = MBX_0; 2500 mcp->out_mb = MBX_0;
@@ -2543,10 +2504,9 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2543 rval = qla2x00_mailbox_command(vha, mcp); 2504 rval = qla2x00_mailbox_command(vha, mcp);
2544 2505
2545 if (rval != QLA_SUCCESS) { 2506 if (rval != QLA_SUCCESS) {
2546 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2507 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2547 vha->host_no, rval));
2548 } else { 2508 } else {
2549 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2509 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
2550 } 2510 }
2551 2511
2552 return rval; 2512 return rval;
@@ -2566,7 +2526,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2566 mbx_cmd_t mc; 2526 mbx_cmd_t mc;
2567 mbx_cmd_t *mcp = &mc; 2527 mbx_cmd_t *mcp = &mc;
2568 2528
2569 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2529 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
2570 2530
2571 mcp->mb[0] = MBC_SERDES_PARAMS; 2531 mcp->mb[0] = MBC_SERDES_PARAMS;
2572 mcp->mb[1] = BIT_0; 2532 mcp->mb[1] = BIT_0;
@@ -2581,11 +2541,11 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2581 2541
2582 if (rval != QLA_SUCCESS) { 2542 if (rval != QLA_SUCCESS) {
2583 /*EMPTY*/ 2543 /*EMPTY*/
2584 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2544 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2585 vha->host_no, rval, mcp->mb[0])); 2545 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2586 } else { 2546 } else {
2587 /*EMPTY*/ 2547 /*EMPTY*/
2588 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2548 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
2589 } 2549 }
2590 2550
2591 return rval; 2551 return rval;
@@ -2601,7 +2561,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2601 if (!IS_FWI2_CAPABLE(vha->hw)) 2561 if (!IS_FWI2_CAPABLE(vha->hw))
2602 return QLA_FUNCTION_FAILED; 2562 return QLA_FUNCTION_FAILED;
2603 2563
2604 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2564 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
2605 2565
2606 mcp->mb[0] = MBC_STOP_FIRMWARE; 2566 mcp->mb[0] = MBC_STOP_FIRMWARE;
2607 mcp->out_mb = MBX_0; 2567 mcp->out_mb = MBX_0;
@@ -2611,12 +2571,11 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2611 rval = qla2x00_mailbox_command(vha, mcp); 2571 rval = qla2x00_mailbox_command(vha, mcp);
2612 2572
2613 if (rval != QLA_SUCCESS) { 2573 if (rval != QLA_SUCCESS) {
2614 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2574 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2615 vha->host_no, rval));
2616 if (mcp->mb[0] == MBS_INVALID_COMMAND) 2575 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2617 rval = QLA_INVALID_COMMAND; 2576 rval = QLA_INVALID_COMMAND;
2618 } else { 2577 } else {
2619 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2578 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
2620 } 2579 }
2621 2580
2622 return rval; 2581 return rval;
@@ -2630,14 +2589,14 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2630 mbx_cmd_t mc; 2589 mbx_cmd_t mc;
2631 mbx_cmd_t *mcp = &mc; 2590 mbx_cmd_t *mcp = &mc;
2632 2591
2592 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
2593
2633 if (!IS_FWI2_CAPABLE(vha->hw)) 2594 if (!IS_FWI2_CAPABLE(vha->hw))
2634 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2635 2596
2636 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2597 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2637 return QLA_FUNCTION_FAILED; 2598 return QLA_FUNCTION_FAILED;
2638 2599
2639 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2640
2641 mcp->mb[0] = MBC_TRACE_CONTROL; 2600 mcp->mb[0] = MBC_TRACE_CONTROL;
2642 mcp->mb[1] = TC_EFT_ENABLE; 2601 mcp->mb[1] = TC_EFT_ENABLE;
2643 mcp->mb[2] = LSW(eft_dma); 2602 mcp->mb[2] = LSW(eft_dma);
@@ -2652,10 +2611,11 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2652 mcp->flags = 0; 2611 mcp->flags = 0;
2653 rval = qla2x00_mailbox_command(vha, mcp); 2612 rval = qla2x00_mailbox_command(vha, mcp);
2654 if (rval != QLA_SUCCESS) { 2613 if (rval != QLA_SUCCESS) {
2655 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2614 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2656 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2615 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2616 rval, mcp->mb[0], mcp->mb[1]);
2657 } else { 2617 } else {
2658 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2618 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
2659 } 2619 }
2660 2620
2661 return rval; 2621 return rval;
@@ -2668,14 +2628,14 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2668 mbx_cmd_t mc; 2628 mbx_cmd_t mc;
2669 mbx_cmd_t *mcp = &mc; 2629 mbx_cmd_t *mcp = &mc;
2670 2630
2631 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
2632
2671 if (!IS_FWI2_CAPABLE(vha->hw)) 2633 if (!IS_FWI2_CAPABLE(vha->hw))
2672 return QLA_FUNCTION_FAILED; 2634 return QLA_FUNCTION_FAILED;
2673 2635
2674 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2636 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2675 return QLA_FUNCTION_FAILED; 2637 return QLA_FUNCTION_FAILED;
2676 2638
2677 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2678
2679 mcp->mb[0] = MBC_TRACE_CONTROL; 2639 mcp->mb[0] = MBC_TRACE_CONTROL;
2680 mcp->mb[1] = TC_EFT_DISABLE; 2640 mcp->mb[1] = TC_EFT_DISABLE;
2681 mcp->out_mb = MBX_1|MBX_0; 2641 mcp->out_mb = MBX_1|MBX_0;
@@ -2684,10 +2644,11 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2684 mcp->flags = 0; 2644 mcp->flags = 0;
2685 rval = qla2x00_mailbox_command(vha, mcp); 2645 rval = qla2x00_mailbox_command(vha, mcp);
2686 if (rval != QLA_SUCCESS) { 2646 if (rval != QLA_SUCCESS) {
2687 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2647 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2688 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2648 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2649 rval, mcp->mb[0], mcp->mb[1]);
2689 } else { 2650 } else {
2690 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2651 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
2691 } 2652 }
2692 2653
2693 return rval; 2654 return rval;
@@ -2701,14 +2662,14 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2701 mbx_cmd_t mc; 2662 mbx_cmd_t mc;
2702 mbx_cmd_t *mcp = &mc; 2663 mbx_cmd_t *mcp = &mc;
2703 2664
2665 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
2666
2704 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) 2667 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2705 return QLA_FUNCTION_FAILED; 2668 return QLA_FUNCTION_FAILED;
2706 2669
2707 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2670 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2708 return QLA_FUNCTION_FAILED; 2671 return QLA_FUNCTION_FAILED;
2709 2672
2710 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2711
2712 mcp->mb[0] = MBC_TRACE_CONTROL; 2673 mcp->mb[0] = MBC_TRACE_CONTROL;
2713 mcp->mb[1] = TC_FCE_ENABLE; 2674 mcp->mb[1] = TC_FCE_ENABLE;
2714 mcp->mb[2] = LSW(fce_dma); 2675 mcp->mb[2] = LSW(fce_dma);
@@ -2727,10 +2688,11 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2727 mcp->flags = 0; 2688 mcp->flags = 0;
2728 rval = qla2x00_mailbox_command(vha, mcp); 2689 rval = qla2x00_mailbox_command(vha, mcp);
2729 if (rval != QLA_SUCCESS) { 2690 if (rval != QLA_SUCCESS) {
2730 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2691 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2731 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2692 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2693 rval, mcp->mb[0], mcp->mb[1]);
2732 } else { 2694 } else {
2733 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2695 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
2734 2696
2735 if (mb) 2697 if (mb)
2736 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2698 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2748,14 +2710,14 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2748 mbx_cmd_t mc; 2710 mbx_cmd_t mc;
2749 mbx_cmd_t *mcp = &mc; 2711 mbx_cmd_t *mcp = &mc;
2750 2712
2713 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
2714
2751 if (!IS_FWI2_CAPABLE(vha->hw)) 2715 if (!IS_FWI2_CAPABLE(vha->hw))
2752 return QLA_FUNCTION_FAILED; 2716 return QLA_FUNCTION_FAILED;
2753 2717
2754 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2718 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2755 return QLA_FUNCTION_FAILED; 2719 return QLA_FUNCTION_FAILED;
2756 2720
2757 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2758
2759 mcp->mb[0] = MBC_TRACE_CONTROL; 2721 mcp->mb[0] = MBC_TRACE_CONTROL;
2760 mcp->mb[1] = TC_FCE_DISABLE; 2722 mcp->mb[1] = TC_FCE_DISABLE;
2761 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 2723 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
@@ -2766,10 +2728,11 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2766 mcp->flags = 0; 2728 mcp->flags = 0;
2767 rval = qla2x00_mailbox_command(vha, mcp); 2729 rval = qla2x00_mailbox_command(vha, mcp);
2768 if (rval != QLA_SUCCESS) { 2730 if (rval != QLA_SUCCESS) {
2769 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2731 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
2770 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2732 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2733 rval, mcp->mb[0], mcp->mb[1]);
2771 } else { 2734 } else {
2772 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2735 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
2773 2736
2774 if (wr) 2737 if (wr)
2775 *wr = (uint64_t) mcp->mb[5] << 48 | 2738 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2794,11 +2757,11 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2794 mbx_cmd_t mc; 2757 mbx_cmd_t mc;
2795 mbx_cmd_t *mcp = &mc; 2758 mbx_cmd_t *mcp = &mc;
2796 2759
2760 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
2761
2797 if (!IS_IIDMA_CAPABLE(vha->hw)) 2762 if (!IS_IIDMA_CAPABLE(vha->hw))
2798 return QLA_FUNCTION_FAILED; 2763 return QLA_FUNCTION_FAILED;
2799 2764
2800 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2801
2802 mcp->mb[0] = MBC_PORT_PARAMS; 2765 mcp->mb[0] = MBC_PORT_PARAMS;
2803 mcp->mb[1] = loop_id; 2766 mcp->mb[1] = loop_id;
2804 mcp->mb[2] = mcp->mb[3] = 0; 2767 mcp->mb[2] = mcp->mb[3] = 0;
@@ -2817,10 +2780,9 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2817 } 2780 }
2818 2781
2819 if (rval != QLA_SUCCESS) { 2782 if (rval != QLA_SUCCESS) {
2820 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2783 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2821 vha->host_no, rval));
2822 } else { 2784 } else {
2823 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2785 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
2824 if (port_speed) 2786 if (port_speed)
2825 *port_speed = mcp->mb[3]; 2787 *port_speed = mcp->mb[3];
2826 } 2788 }
@@ -2836,11 +2798,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2836 mbx_cmd_t mc; 2798 mbx_cmd_t mc;
2837 mbx_cmd_t *mcp = &mc; 2799 mbx_cmd_t *mcp = &mc;
2838 2800
2801 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
2802
2839 if (!IS_IIDMA_CAPABLE(vha->hw)) 2803 if (!IS_IIDMA_CAPABLE(vha->hw))
2840 return QLA_FUNCTION_FAILED; 2804 return QLA_FUNCTION_FAILED;
2841 2805
2842 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2843
2844 mcp->mb[0] = MBC_PORT_PARAMS; 2806 mcp->mb[0] = MBC_PORT_PARAMS;
2845 mcp->mb[1] = loop_id; 2807 mcp->mb[1] = loop_id;
2846 mcp->mb[2] = BIT_0; 2808 mcp->mb[2] = BIT_0;
@@ -2863,10 +2825,9 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2863 } 2825 }
2864 2826
2865 if (rval != QLA_SUCCESS) { 2827 if (rval != QLA_SUCCESS) {
2866 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2828 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
2867 vha->host_no, rval));
2868 } else { 2829 } else {
2869 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2830 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
2870 } 2831 }
2871 2832
2872 return rval; 2833 return rval;
@@ -2882,33 +2843,36 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2882 scsi_qla_host_t *vp; 2843 scsi_qla_host_t *vp;
2883 unsigned long flags; 2844 unsigned long flags;
2884 2845
2846 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
2847
2885 if (rptid_entry->entry_status != 0) 2848 if (rptid_entry->entry_status != 0)
2886 return; 2849 return;
2887 2850
2888 if (rptid_entry->format == 0) { 2851 if (rptid_entry->format == 0) {
2889 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2852 ql_dbg(ql_dbg_mbx, vha, 0x10b7,
2890 " number of VPs acquired %d\n", __func__, vha->host_no, 2853 "Format 0 : Number of VPs setup %d, number of "
2891 MSB(le16_to_cpu(rptid_entry->vp_count)), 2854 "VPs acquired %d.\n",
2892 LSB(le16_to_cpu(rptid_entry->vp_count)))); 2855 MSB(le16_to_cpu(rptid_entry->vp_count)),
2893 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, 2856 LSB(le16_to_cpu(rptid_entry->vp_count)));
2894 rptid_entry->port_id[2], rptid_entry->port_id[1], 2857 ql_dbg(ql_dbg_mbx, vha, 0x10b8,
2895 rptid_entry->port_id[0])); 2858 "Primary port id %02x%02x%02x.\n",
2859 rptid_entry->port_id[2], rptid_entry->port_id[1],
2860 rptid_entry->port_id[0]);
2896 } else if (rptid_entry->format == 1) { 2861 } else if (rptid_entry->format == 1) {
2897 vp_idx = LSB(stat); 2862 vp_idx = LSB(stat);
2898 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2863 ql_dbg(ql_dbg_mbx, vha, 0x10b9,
2899 "- status %d - " 2864 "Format 1: VP[%d] enabled - status %d - with "
2900 "with port id %02x%02x%02x\n", __func__, vha->host_no, 2865 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2901 vp_idx, MSB(stat),
2902 rptid_entry->port_id[2], rptid_entry->port_id[1], 2866 rptid_entry->port_id[2], rptid_entry->port_id[1],
2903 rptid_entry->port_id[0])); 2867 rptid_entry->port_id[0]);
2904 2868
2905 vp = vha; 2869 vp = vha;
2906 if (vp_idx == 0 && (MSB(stat) != 1)) 2870 if (vp_idx == 0 && (MSB(stat) != 1))
2907 goto reg_needed; 2871 goto reg_needed;
2908 2872
2909 if (MSB(stat) == 1) { 2873 if (MSB(stat) == 1) {
2910 DEBUG2(printk("scsi(%ld): Could not acquire ID for " 2874 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
2911 "VP[%d].\n", vha->host_no, vp_idx)); 2875 "Could not acquire ID for VP[%d].\n", vp_idx);
2912 return; 2876 return;
2913 } 2877 }
2914 2878
@@ -2963,10 +2927,12 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2963 2927
2964 /* This can be called by the parent */ 2928 /* This can be called by the parent */
2965 2929
2930 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
2931
2966 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 2932 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2967 if (!vpmod) { 2933 if (!vpmod) {
2968 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " 2934 ql_log(ql_log_warn, vha, 0x10bc,
2969 "IOCB.\n", __func__, vha->host_no)); 2935 "Failed to allocate modify VP IOCB.\n");
2970 return QLA_MEMORY_ALLOC_FAILED; 2936 return QLA_MEMORY_ALLOC_FAILED;
2971 } 2937 }
2972 2938
@@ -2983,22 +2949,21 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2983 2949
2984 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 2950 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2985 if (rval != QLA_SUCCESS) { 2951 if (rval != QLA_SUCCESS) {
2986 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" 2952 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
2987 "(%x).\n", __func__, base_vha->host_no, rval)); 2953 "Failed to issue VP config IOCB (%x).\n", rval);
2988 } else if (vpmod->comp_status != 0) { 2954 } else if (vpmod->comp_status != 0) {
2989 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2955 ql_dbg(ql_dbg_mbx, vha, 0x10be,
2990 "-- error status (%x).\n", __func__, base_vha->host_no, 2956 "Failed to complete IOCB -- error status (%x).\n",
2991 vpmod->comp_status)); 2957 vpmod->comp_status);
2992 rval = QLA_FUNCTION_FAILED; 2958 rval = QLA_FUNCTION_FAILED;
2993 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2959 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2994 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2960 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
2995 "-- completion status (%x).\n", __func__, base_vha->host_no, 2961 "Failed to complete IOCB -- completion status (%x).\n",
2996 le16_to_cpu(vpmod->comp_status))); 2962 le16_to_cpu(vpmod->comp_status));
2997 rval = QLA_FUNCTION_FAILED; 2963 rval = QLA_FUNCTION_FAILED;
2998 } else { 2964 } else {
2999 /* EMPTY */ 2965 /* EMPTY */
3000 DEBUG11(printk("%s(%ld): done.\n", __func__, 2966 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
3001 base_vha->host_no));
3002 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 2967 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3003 } 2968 }
3004 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 2969 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3032,17 +2997,16 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3032 int vp_index = vha->vp_idx; 2997 int vp_index = vha->vp_idx;
3033 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2998 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3034 2999
3035 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, 3000 ql_dbg(ql_dbg_mbx, vha, 0x10c1,
3036 vha->host_no, vp_index)); 3001 "Entered %s enabling index %d.\n", __func__, vp_index);
3037 3002
3038 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3003 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3039 return QLA_PARAMETER_ERROR; 3004 return QLA_PARAMETER_ERROR;
3040 3005
3041 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); 3006 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3042 if (!vce) { 3007 if (!vce) {
3043 DEBUG2_3(printk("%s(%ld): " 3008 ql_log(ql_log_warn, vha, 0x10c2,
3044 "failed to allocate VP Control IOCB.\n", __func__, 3009 "Failed to allocate VP control IOCB.\n");
3045 base_vha->host_no));
3046 return QLA_MEMORY_ALLOC_FAILED; 3010 return QLA_MEMORY_ALLOC_FAILED;
3047 } 3011 }
3048 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); 3012 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -3063,28 +3027,20 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3063 3027
3064 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); 3028 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3065 if (rval != QLA_SUCCESS) { 3029 if (rval != QLA_SUCCESS) {
3066 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" 3030 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3067 "(%x).\n", __func__, base_vha->host_no, rval)); 3031 "Failed to issue VP control IOCB (%x).\n", rval);
3068 printk("%s(%ld): failed to issue VP control IOCB"
3069 "(%x).\n", __func__, base_vha->host_no, rval);
3070 } else if (vce->entry_status != 0) { 3032 } else if (vce->entry_status != 0) {
3071 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 3033 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3072 "-- error status (%x).\n", __func__, base_vha->host_no, 3034 "Failed to complete IOCB -- error status (%x).\n",
3073 vce->entry_status));
3074 printk("%s(%ld): failed to complete IOCB "
3075 "-- error status (%x).\n", __func__, base_vha->host_no,
3076 vce->entry_status); 3035 vce->entry_status);
3077 rval = QLA_FUNCTION_FAILED; 3036 rval = QLA_FUNCTION_FAILED;
3078 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 3037 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3079 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 3038 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3080 "-- completion status (%x).\n", __func__, base_vha->host_no, 3039 "Failed to complet IOCB -- completion status (%x).\n",
3081 le16_to_cpu(vce->comp_status)));
3082 printk("%s(%ld): failed to complete IOCB "
3083 "-- completion status (%x).\n", __func__, base_vha->host_no,
3084 le16_to_cpu(vce->comp_status)); 3040 le16_to_cpu(vce->comp_status));
3085 rval = QLA_FUNCTION_FAILED; 3041 rval = QLA_FUNCTION_FAILED;
3086 } else { 3042 } else {
3087 DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no)); 3043 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
3088 } 3044 }
3089 3045
3090 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 3046 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3121,6 +3077,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3121 mbx_cmd_t mc; 3077 mbx_cmd_t mc;
3122 mbx_cmd_t *mcp = &mc; 3078 mbx_cmd_t *mcp = &mc;
3123 3079
3080 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
3081
3124 /* 3082 /*
3125 * This command is implicitly executed by firmware during login for the 3083 * This command is implicitly executed by firmware during login for the
3126 * physical hosts 3084 * physical hosts
@@ -3155,7 +3113,7 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3155 mbx_cmd_t mc; 3113 mbx_cmd_t mc;
3156 mbx_cmd_t *mcp = &mc; 3114 mbx_cmd_t *mcp = &mc;
3157 3115
3158 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3116 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
3159 3117
3160 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 3118 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3161 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 3119 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3186,10 +3144,10 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3186 rval = qla2x00_mailbox_command(vha, mcp); 3144 rval = qla2x00_mailbox_command(vha, mcp);
3187 3145
3188 if (rval != QLA_SUCCESS) { 3146 if (rval != QLA_SUCCESS) {
3189 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3147 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3190 vha->host_no, rval, mcp->mb[0])); 3148 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3191 } else { 3149 } else {
3192 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3150 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
3193 } 3151 }
3194 3152
3195 return rval; 3153 return rval;
@@ -3214,12 +3172,10 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3214 unsigned long flags; 3172 unsigned long flags;
3215 struct qla_hw_data *ha = vha->hw; 3173 struct qla_hw_data *ha = vha->hw;
3216 3174
3217 DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3175 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
3218 3176
3219 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3177 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3220 if (mn == NULL) { 3178 if (mn == NULL) {
3221 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
3222 "IOCB.\n", __func__, vha->host_no));
3223 return QLA_MEMORY_ALLOC_FAILED; 3179 return QLA_MEMORY_ALLOC_FAILED;
3224 } 3180 }
3225 3181
@@ -3237,43 +3193,43 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3237 mn->p.req.entry_count = 1; 3193 mn->p.req.entry_count = 1;
3238 mn->p.req.options = cpu_to_le16(options); 3194 mn->p.req.options = cpu_to_le16(options);
3239 3195
3240 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, 3196 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3241 vha->host_no)); 3197 "Dump of Verify Request.\n");
3242 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3198 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3243 sizeof(*mn))); 3199 (uint8_t *)mn, sizeof(*mn));
3244 3200
3245 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 3201 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3246 if (rval != QLA_SUCCESS) { 3202 if (rval != QLA_SUCCESS) {
3247 DEBUG2_16(printk("%s(%ld): failed to issue Verify " 3203 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3248 "IOCB (%x).\n", __func__, vha->host_no, rval)); 3204 "Failed to issue verify IOCB (%x).\n", rval);
3249 goto verify_done; 3205 goto verify_done;
3250 } 3206 }
3251 3207
3252 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, 3208 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3253 vha->host_no)); 3209 "Dump of Verify Response.\n");
3254 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3210 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3255 sizeof(*mn))); 3211 (uint8_t *)mn, sizeof(*mn));
3256 3212
3257 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 3213 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3258 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3214 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3259 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3215 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3260 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, 3216 ql_dbg(ql_dbg_mbx, vha, 0x10ce,
3261 vha->host_no, status[0], status[1])); 3217 "cs=%x fc=%x.\n", status[0], status[1]);
3262 3218
3263 if (status[0] != CS_COMPLETE) { 3219 if (status[0] != CS_COMPLETE) {
3264 rval = QLA_FUNCTION_FAILED; 3220 rval = QLA_FUNCTION_FAILED;
3265 if (!(options & VCO_DONT_UPDATE_FW)) { 3221 if (!(options & VCO_DONT_UPDATE_FW)) {
3266 DEBUG2_16(printk("%s(%ld): Firmware update " 3222 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3267 "failed. Retrying without update " 3223 "Firmware update failed. Retrying "
3268 "firmware.\n", __func__, vha->host_no)); 3224 "without update firmware.\n");
3269 options |= VCO_DONT_UPDATE_FW; 3225 options |= VCO_DONT_UPDATE_FW;
3270 options &= ~VCO_FORCE_UPDATE; 3226 options &= ~VCO_FORCE_UPDATE;
3271 retry = 1; 3227 retry = 1;
3272 } 3228 }
3273 } else { 3229 } else {
3274 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", 3230 ql_dbg(ql_dbg_mbx, vha, 0x10d0,
3275 __func__, vha->host_no, 3231 "Firmware updated to %x.\n",
3276 le32_to_cpu(mn->p.rsp.fw_ver))); 3232 le32_to_cpu(mn->p.rsp.fw_ver));
3277 3233
3278 /* NOTE: we only update OP firmware. */ 3234 /* NOTE: we only update OP firmware. */
3279 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 3235 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
@@ -3288,10 +3244,9 @@ verify_done:
3288 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 3244 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3289 3245
3290 if (rval != QLA_SUCCESS) { 3246 if (rval != QLA_SUCCESS) {
3291 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, 3247 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
3292 vha->host_no, rval));
3293 } else { 3248 } else {
3294 DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3249 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
3295 } 3250 }
3296 3251
3297 return rval; 3252 return rval;
@@ -3307,6 +3262,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3307 struct device_reg_25xxmq __iomem *reg; 3262 struct device_reg_25xxmq __iomem *reg;
3308 struct qla_hw_data *ha = vha->hw; 3263 struct qla_hw_data *ha = vha->hw;
3309 3264
3265 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
3266
3310 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3267 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3311 mcp->mb[1] = req->options; 3268 mcp->mb[1] = req->options;
3312 mcp->mb[2] = MSW(LSD(req->dma)); 3269 mcp->mb[2] = MSW(LSD(req->dma));
@@ -3344,9 +3301,13 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3344 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3345 3302
3346 rval = qla2x00_mailbox_command(vha, mcp); 3303 rval = qla2x00_mailbox_command(vha, mcp);
3347 if (rval != QLA_SUCCESS) 3304 if (rval != QLA_SUCCESS) {
3348 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n", 3305 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3349 __func__, vha->host_no, rval, mcp->mb[0])); 3306 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3307 } else {
3308 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
3309 }
3310
3350 return rval; 3311 return rval;
3351} 3312}
3352 3313
@@ -3360,6 +3321,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3360 struct device_reg_25xxmq __iomem *reg; 3321 struct device_reg_25xxmq __iomem *reg;
3361 struct qla_hw_data *ha = vha->hw; 3322 struct qla_hw_data *ha = vha->hw;
3362 3323
3324 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
3325
3363 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3326 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3364 mcp->mb[1] = rsp->options; 3327 mcp->mb[1] = rsp->options;
3365 mcp->mb[2] = MSW(LSD(rsp->dma)); 3328 mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3393,10 +3356,13 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3393 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3356 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3394 3357
3395 rval = qla2x00_mailbox_command(vha, mcp); 3358 rval = qla2x00_mailbox_command(vha, mcp);
3396 if (rval != QLA_SUCCESS) 3359 if (rval != QLA_SUCCESS) {
3397 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x " 3360 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3398 "mb0=%x.\n", __func__, 3361 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3399 vha->host_no, rval, mcp->mb[0])); 3362 } else {
3363 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
3364 }
3365
3400 return rval; 3366 return rval;
3401} 3367}
3402 3368
@@ -3407,7 +3373,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3407 mbx_cmd_t mc; 3373 mbx_cmd_t mc;
3408 mbx_cmd_t *mcp = &mc; 3374 mbx_cmd_t *mcp = &mc;
3409 3375
3410 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3376 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
3411 3377
3412 mcp->mb[0] = MBC_IDC_ACK; 3378 mcp->mb[0] = MBC_IDC_ACK;
3413 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 3379 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3418,10 +3384,10 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3418 rval = qla2x00_mailbox_command(vha, mcp); 3384 rval = qla2x00_mailbox_command(vha, mcp);
3419 3385
3420 if (rval != QLA_SUCCESS) { 3386 if (rval != QLA_SUCCESS) {
3421 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3387 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3422 vha->host_no, rval, mcp->mb[0])); 3388 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3423 } else { 3389 } else {
3424 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3390 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
3425 } 3391 }
3426 3392
3427 return rval; 3393 return rval;
@@ -3434,11 +3400,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3434 mbx_cmd_t mc; 3400 mbx_cmd_t mc;
3435 mbx_cmd_t *mcp = &mc; 3401 mbx_cmd_t *mcp = &mc;
3436 3402
3403 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
3404
3437 if (!IS_QLA81XX(vha->hw)) 3405 if (!IS_QLA81XX(vha->hw))
3438 return QLA_FUNCTION_FAILED; 3406 return QLA_FUNCTION_FAILED;
3439 3407
3440 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3441
3442 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3408 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3443 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 3409 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3444 mcp->out_mb = MBX_1|MBX_0; 3410 mcp->out_mb = MBX_1|MBX_0;
@@ -3448,10 +3414,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3448 rval = qla2x00_mailbox_command(vha, mcp); 3414 rval = qla2x00_mailbox_command(vha, mcp);
3449 3415
3450 if (rval != QLA_SUCCESS) { 3416 if (rval != QLA_SUCCESS) {
3451 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 3417 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3452 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3418 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3419 rval, mcp->mb[0], mcp->mb[1]);
3453 } else { 3420 } else {
3454 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3421 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
3455 *sector_size = mcp->mb[1]; 3422 *sector_size = mcp->mb[1];
3456 } 3423 }
3457 3424
@@ -3468,7 +3435,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3468 if (!IS_QLA81XX(vha->hw)) 3435 if (!IS_QLA81XX(vha->hw))
3469 return QLA_FUNCTION_FAILED; 3436 return QLA_FUNCTION_FAILED;
3470 3437
3471 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3438 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
3472 3439
3473 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3440 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3474 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 3441 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3480,10 +3447,11 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3480 rval = qla2x00_mailbox_command(vha, mcp); 3447 rval = qla2x00_mailbox_command(vha, mcp);
3481 3448
3482 if (rval != QLA_SUCCESS) { 3449 if (rval != QLA_SUCCESS) {
3483 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 3450 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3484 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3451 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3452 rval, mcp->mb[0], mcp->mb[1]);
3485 } else { 3453 } else {
3486 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3454 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
3487 } 3455 }
3488 3456
3489 return rval; 3457 return rval;
@@ -3499,7 +3467,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3499 if (!IS_QLA81XX(vha->hw)) 3467 if (!IS_QLA81XX(vha->hw))
3500 return QLA_FUNCTION_FAILED; 3468 return QLA_FUNCTION_FAILED;
3501 3469
3502 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3470 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
3503 3471
3504 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3472 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3505 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 3473 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3514,11 +3482,11 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3514 rval = qla2x00_mailbox_command(vha, mcp); 3482 rval = qla2x00_mailbox_command(vha, mcp);
3515 3483
3516 if (rval != QLA_SUCCESS) { 3484 if (rval != QLA_SUCCESS) {
3517 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 3485 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3518 "mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], 3486 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3519 mcp->mb[1], mcp->mb[2])); 3487 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3520 } else { 3488 } else {
3521 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3489 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
3522 } 3490 }
3523 3491
3524 return rval; 3492 return rval;
@@ -3531,7 +3499,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3531 mbx_cmd_t mc; 3499 mbx_cmd_t mc;
3532 mbx_cmd_t *mcp = &mc; 3500 mbx_cmd_t *mcp = &mc;
3533 3501
3534 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3502 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
3535 3503
3536 mcp->mb[0] = MBC_RESTART_MPI_FW; 3504 mcp->mb[0] = MBC_RESTART_MPI_FW;
3537 mcp->out_mb = MBX_0; 3505 mcp->out_mb = MBX_0;
@@ -3541,10 +3509,11 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3541 rval = qla2x00_mailbox_command(vha, mcp); 3509 rval = qla2x00_mailbox_command(vha, mcp);
3542 3510
3543 if (rval != QLA_SUCCESS) { 3511 if (rval != QLA_SUCCESS) {
3544 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", 3512 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3545 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3513 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3514 rval, mcp->mb[0], mcp->mb[1]);
3546 } else { 3515 } else {
3547 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3516 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
3548 } 3517 }
3549 3518
3550 return rval; 3519 return rval;
@@ -3559,11 +3528,11 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3559 mbx_cmd_t *mcp = &mc; 3528 mbx_cmd_t *mcp = &mc;
3560 struct qla_hw_data *ha = vha->hw; 3529 struct qla_hw_data *ha = vha->hw;
3561 3530
3531 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
3532
3562 if (!IS_FWI2_CAPABLE(ha)) 3533 if (!IS_FWI2_CAPABLE(ha))
3563 return QLA_FUNCTION_FAILED; 3534 return QLA_FUNCTION_FAILED;
3564 3535
3565 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3566
3567 if (len == 1) 3536 if (len == 1)
3568 opt |= BIT_0; 3537 opt |= BIT_0;
3569 3538
@@ -3586,10 +3555,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3586 *sfp = mcp->mb[1]; 3555 *sfp = mcp->mb[1];
3587 3556
3588 if (rval != QLA_SUCCESS) { 3557 if (rval != QLA_SUCCESS) {
3589 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3558 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3590 vha->host_no, rval, mcp->mb[0])); 3559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3591 } else { 3560 } else {
3592 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3561 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
3593 } 3562 }
3594 3563
3595 return rval; 3564 return rval;
@@ -3604,11 +3573,11 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3604 mbx_cmd_t *mcp = &mc; 3573 mbx_cmd_t *mcp = &mc;
3605 struct qla_hw_data *ha = vha->hw; 3574 struct qla_hw_data *ha = vha->hw;
3606 3575
3576 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
3577
3607 if (!IS_FWI2_CAPABLE(ha)) 3578 if (!IS_FWI2_CAPABLE(ha))
3608 return QLA_FUNCTION_FAILED; 3579 return QLA_FUNCTION_FAILED;
3609 3580
3610 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3611
3612 if (len == 1) 3581 if (len == 1)
3613 opt |= BIT_0; 3582 opt |= BIT_0;
3614 3583
@@ -3631,10 +3600,10 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3631 rval = qla2x00_mailbox_command(vha, mcp); 3600 rval = qla2x00_mailbox_command(vha, mcp);
3632 3601
3633 if (rval != QLA_SUCCESS) { 3602 if (rval != QLA_SUCCESS) {
3634 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3603 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3635 vha->host_no, rval, mcp->mb[0])); 3604 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3636 } else { 3605 } else {
3637 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3606 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
3638 } 3607 }
3639 3608
3640 return rval; 3609 return rval;
@@ -3648,11 +3617,11 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3648 mbx_cmd_t mc; 3617 mbx_cmd_t mc;
3649 mbx_cmd_t *mcp = &mc; 3618 mbx_cmd_t *mcp = &mc;
3650 3619
3620 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
3621
3651 if (!IS_QLA8XXX_TYPE(vha->hw)) 3622 if (!IS_QLA8XXX_TYPE(vha->hw))
3652 return QLA_FUNCTION_FAILED; 3623 return QLA_FUNCTION_FAILED;
3653 3624
3654 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3655
3656 mcp->mb[0] = MBC_GET_XGMAC_STATS; 3625 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3657 mcp->mb[2] = MSW(stats_dma); 3626 mcp->mb[2] = MSW(stats_dma);
3658 mcp->mb[3] = LSW(stats_dma); 3627 mcp->mb[3] = LSW(stats_dma);
@@ -3666,11 +3635,12 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3666 rval = qla2x00_mailbox_command(vha, mcp); 3635 rval = qla2x00_mailbox_command(vha, mcp);
3667 3636
3668 if (rval != QLA_SUCCESS) { 3637 if (rval != QLA_SUCCESS) {
3669 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " 3638 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
3670 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, 3639 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3671 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 3640 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3672 } else { 3641 } else {
3673 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3642 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
3643
3674 3644
3675 *actual_size = mcp->mb[2] << 2; 3645 *actual_size = mcp->mb[2] << 2;
3676 } 3646 }
@@ -3686,11 +3656,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3686 mbx_cmd_t mc; 3656 mbx_cmd_t mc;
3687 mbx_cmd_t *mcp = &mc; 3657 mbx_cmd_t *mcp = &mc;
3688 3658
3659 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
3660
3689 if (!IS_QLA8XXX_TYPE(vha->hw)) 3661 if (!IS_QLA8XXX_TYPE(vha->hw))
3690 return QLA_FUNCTION_FAILED; 3662 return QLA_FUNCTION_FAILED;
3691 3663
3692 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3693
3694 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 3664 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3695 mcp->mb[1] = 0; 3665 mcp->mb[1] = 0;
3696 mcp->mb[2] = MSW(tlv_dma); 3666 mcp->mb[2] = MSW(tlv_dma);
@@ -3705,11 +3675,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3705 rval = qla2x00_mailbox_command(vha, mcp); 3675 rval = qla2x00_mailbox_command(vha, mcp);
3706 3676
3707 if (rval != QLA_SUCCESS) { 3677 if (rval != QLA_SUCCESS) {
3708 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " 3678 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
3709 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, 3679 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3710 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 3680 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3711 } else { 3681 } else {
3712 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3682 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
3713 } 3683 }
3714 3684
3715 return rval; 3685 return rval;
@@ -3722,11 +3692,11 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3722 mbx_cmd_t mc; 3692 mbx_cmd_t mc;
3723 mbx_cmd_t *mcp = &mc; 3693 mbx_cmd_t *mcp = &mc;
3724 3694
3695 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
3696
3725 if (!IS_FWI2_CAPABLE(vha->hw)) 3697 if (!IS_FWI2_CAPABLE(vha->hw))
3726 return QLA_FUNCTION_FAILED; 3698 return QLA_FUNCTION_FAILED;
3727 3699
3728 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3729
3730 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 3700 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3731 mcp->mb[1] = LSW(risc_addr); 3701 mcp->mb[1] = LSW(risc_addr);
3732 mcp->mb[8] = MSW(risc_addr); 3702 mcp->mb[8] = MSW(risc_addr);
@@ -3736,10 +3706,10 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3736 mcp->flags = 0; 3706 mcp->flags = 0;
3737 rval = qla2x00_mailbox_command(vha, mcp); 3707 rval = qla2x00_mailbox_command(vha, mcp);
3738 if (rval != QLA_SUCCESS) { 3708 if (rval != QLA_SUCCESS) {
3739 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3709 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3740 vha->host_no, rval, mcp->mb[0])); 3710 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3741 } else { 3711 } else {
3742 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3712 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
3743 *data = mcp->mb[3] << 16 | mcp->mb[2]; 3713 *data = mcp->mb[3] << 16 | mcp->mb[2];
3744 } 3714 }
3745 3715
@@ -3755,7 +3725,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3755 mbx_cmd_t *mcp = &mc; 3725 mbx_cmd_t *mcp = &mc;
3756 uint32_t iter_cnt = 0x1; 3726 uint32_t iter_cnt = 0x1;
3757 3727
3758 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); 3728 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
3759 3729
3760 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3730 memset(mcp->mb, 0 , sizeof(mcp->mb));
3761 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 3731 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3794,15 +3764,12 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3794 rval = qla2x00_mailbox_command(vha, mcp); 3764 rval = qla2x00_mailbox_command(vha, mcp);
3795 3765
3796 if (rval != QLA_SUCCESS) { 3766 if (rval != QLA_SUCCESS) {
3797 DEBUG2(printk(KERN_WARNING 3767 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
3798 "(%ld): failed=%x mb[0]=0x%x " 3768 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
3799 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x " 3769 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3800 "mb[19]=0x%x.\n", 3770 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3801 vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3802 mcp->mb[3], mcp->mb[18], mcp->mb[19]));
3803 } else { 3771 } else {
3804 DEBUG2(printk(KERN_WARNING 3772 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
3805 "scsi(%ld): done.\n", vha->host_no));
3806 } 3773 }
3807 3774
3808 /* Copy mailbox information */ 3775 /* Copy mailbox information */
@@ -3819,7 +3786,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3819 mbx_cmd_t *mcp = &mc; 3786 mbx_cmd_t *mcp = &mc;
3820 struct qla_hw_data *ha = vha->hw; 3787 struct qla_hw_data *ha = vha->hw;
3821 3788
3822 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); 3789 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
3823 3790
3824 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3791 memset(mcp->mb, 0 , sizeof(mcp->mb));
3825 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 3792 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3858,12 +3825,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3858 rval = qla2x00_mailbox_command(vha, mcp); 3825 rval = qla2x00_mailbox_command(vha, mcp);
3859 3826
3860 if (rval != QLA_SUCCESS) { 3827 if (rval != QLA_SUCCESS) {
3861 DEBUG2(printk(KERN_WARNING 3828 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
3862 "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", 3829 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3863 vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3830 rval, mcp->mb[0], mcp->mb[1]);
3864 } else { 3831 } else {
3865 DEBUG2(printk(KERN_WARNING 3832 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
3866 "scsi(%ld): done.\n", vha->host_no));
3867 } 3833 }
3868 3834
3869 /* Copy mailbox information */ 3835 /* Copy mailbox information */
@@ -3872,14 +3838,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3872} 3838}
3873 3839
3874int 3840int
3875qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) 3841qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3876{ 3842{
3877 int rval; 3843 int rval;
3878 mbx_cmd_t mc; 3844 mbx_cmd_t mc;
3879 mbx_cmd_t *mcp = &mc; 3845 mbx_cmd_t *mcp = &mc;
3880 3846
3881 DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__, 3847 ql_dbg(ql_dbg_mbx, vha, 0x10fd,
3882 ha->host_no, enable_diagnostic)); 3848 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3883 3849
3884 mcp->mb[0] = MBC_ISP84XX_RESET; 3850 mcp->mb[0] = MBC_ISP84XX_RESET;
3885 mcp->mb[1] = enable_diagnostic; 3851 mcp->mb[1] = enable_diagnostic;
@@ -3887,13 +3853,12 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
3887 mcp->in_mb = MBX_1|MBX_0; 3853 mcp->in_mb = MBX_1|MBX_0;
3888 mcp->tov = MBX_TOV_SECONDS; 3854 mcp->tov = MBX_TOV_SECONDS;
3889 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 3855 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3890 rval = qla2x00_mailbox_command(ha, mcp); 3856 rval = qla2x00_mailbox_command(vha, mcp);
3891 3857
3892 if (rval != QLA_SUCCESS) 3858 if (rval != QLA_SUCCESS)
3893 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, 3859 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3894 rval));
3895 else 3860 else
3896 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3861 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
3897 3862
3898 return rval; 3863 return rval;
3899} 3864}
@@ -3905,11 +3870,11 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3905 mbx_cmd_t mc; 3870 mbx_cmd_t mc;
3906 mbx_cmd_t *mcp = &mc; 3871 mbx_cmd_t *mcp = &mc;
3907 3872
3873 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
3874
3908 if (!IS_FWI2_CAPABLE(vha->hw)) 3875 if (!IS_FWI2_CAPABLE(vha->hw))
3909 return QLA_FUNCTION_FAILED; 3876 return QLA_FUNCTION_FAILED;
3910 3877
3911 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3912
3913 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 3878 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3914 mcp->mb[1] = LSW(risc_addr); 3879 mcp->mb[1] = LSW(risc_addr);
3915 mcp->mb[2] = LSW(data); 3880 mcp->mb[2] = LSW(data);
@@ -3921,10 +3886,10 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3921 mcp->flags = 0; 3886 mcp->flags = 0;
3922 rval = qla2x00_mailbox_command(vha, mcp); 3887 rval = qla2x00_mailbox_command(vha, mcp);
3923 if (rval != QLA_SUCCESS) { 3888 if (rval != QLA_SUCCESS) {
3924 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3889 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3925 vha->host_no, rval, mcp->mb[0])); 3890 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3926 } else { 3891 } else {
3927 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3892 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
3928 } 3893 }
3929 3894
3930 return rval; 3895 return rval;
@@ -3941,8 +3906,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3941 3906
3942 rval = QLA_SUCCESS; 3907 rval = QLA_SUCCESS;
3943 3908
3944 DEBUG11(qla_printk(KERN_INFO, ha, 3909 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
3945 "%s(%ld): entered.\n", __func__, vha->host_no));
3946 3910
3947 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 3911 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3948 3912
@@ -3982,11 +3946,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3982 rval = QLA_FUNCTION_FAILED; 3946 rval = QLA_FUNCTION_FAILED;
3983 3947
3984 if (rval != QLA_SUCCESS) { 3948 if (rval != QLA_SUCCESS) {
3985 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", 3949 ql_dbg(ql_dbg_mbx, vha, 0x1104,
3986 __func__, vha->host_no, rval, mb[0])); 3950 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
3987 } else { 3951 } else {
3988 DEBUG11(printk(KERN_INFO 3952 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
3989 "%s(%ld): done.\n", __func__, vha->host_no));
3990 } 3953 }
3991 3954
3992 return rval; 3955 return rval;
@@ -3999,12 +3962,11 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
3999 mbx_cmd_t *mcp = &mc; 3962 mbx_cmd_t *mcp = &mc;
4000 struct qla_hw_data *ha = vha->hw; 3963 struct qla_hw_data *ha = vha->hw;
4001 3964
3965 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
3966
4002 if (!IS_FWI2_CAPABLE(ha)) 3967 if (!IS_FWI2_CAPABLE(ha))
4003 return QLA_FUNCTION_FAILED; 3968 return QLA_FUNCTION_FAILED;
4004 3969
4005 DEBUG11(qla_printk(KERN_INFO, ha,
4006 "%s(%ld): entered.\n", __func__, vha->host_no));
4007
4008 mcp->mb[0] = MBC_DATA_RATE; 3970 mcp->mb[0] = MBC_DATA_RATE;
4009 mcp->mb[1] = 0; 3971 mcp->mb[1] = 0;
4010 mcp->out_mb = MBX_1|MBX_0; 3972 mcp->out_mb = MBX_1|MBX_0;
@@ -4013,11 +3975,10 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4013 mcp->flags = 0; 3975 mcp->flags = 0;
4014 rval = qla2x00_mailbox_command(vha, mcp); 3976 rval = qla2x00_mailbox_command(vha, mcp);
4015 if (rval != QLA_SUCCESS) { 3977 if (rval != QLA_SUCCESS) {
4016 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", 3978 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4017 __func__, vha->host_no, rval, mcp->mb[0])); 3979 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4018 } else { 3980 } else {
4019 DEBUG11(printk(KERN_INFO 3981 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
4020 "%s(%ld): done.\n", __func__, vha->host_no));
4021 if (mcp->mb[1] != 0x7) 3982 if (mcp->mb[1] != 0x7)
4022 ha->link_data_rate = mcp->mb[1]; 3983 ha->link_data_rate = mcp->mb[1];
4023 } 3984 }
@@ -4033,8 +3994,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4033 mbx_cmd_t *mcp = &mc; 3994 mbx_cmd_t *mcp = &mc;
4034 struct qla_hw_data *ha = vha->hw; 3995 struct qla_hw_data *ha = vha->hw;
4035 3996
4036 DEBUG11(printk(KERN_INFO 3997 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
4037 "%s(%ld): entered.\n", __func__, vha->host_no));
4038 3998
4039 if (!IS_QLA81XX(ha)) 3999 if (!IS_QLA81XX(ha))
4040 return QLA_FUNCTION_FAILED; 4000 return QLA_FUNCTION_FAILED;
@@ -4047,15 +4007,13 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4047 rval = qla2x00_mailbox_command(vha, mcp); 4007 rval = qla2x00_mailbox_command(vha, mcp);
4048 4008
4049 if (rval != QLA_SUCCESS) { 4009 if (rval != QLA_SUCCESS) {
4050 DEBUG2_3_11(printk(KERN_WARNING 4010 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4051 "%s(%ld): failed=%x (%x).\n", __func__, 4011 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4052 vha->host_no, rval, mcp->mb[0]));
4053 } else { 4012 } else {
4054 /* Copy all bits to preserve original value */ 4013 /* Copy all bits to preserve original value */
4055 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 4014 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4056 4015
4057 DEBUG11(printk(KERN_INFO 4016 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
4058 "%s(%ld): done.\n", __func__, vha->host_no));
4059 } 4017 }
4060 return rval; 4018 return rval;
4061} 4019}
@@ -4067,8 +4025,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4067 mbx_cmd_t mc; 4025 mbx_cmd_t mc;
4068 mbx_cmd_t *mcp = &mc; 4026 mbx_cmd_t *mcp = &mc;
4069 4027
4070 DEBUG11(printk(KERN_INFO 4028 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
4071 "%s(%ld): entered.\n", __func__, vha->host_no));
4072 4029
4073 mcp->mb[0] = MBC_SET_PORT_CONFIG; 4030 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4074 /* Copy all bits to preserve original setting */ 4031 /* Copy all bits to preserve original setting */
@@ -4080,12 +4037,10 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4080 rval = qla2x00_mailbox_command(vha, mcp); 4037 rval = qla2x00_mailbox_command(vha, mcp);
4081 4038
4082 if (rval != QLA_SUCCESS) { 4039 if (rval != QLA_SUCCESS) {
4083 DEBUG2_3_11(printk(KERN_WARNING 4040 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4084 "%s(%ld): failed=%x (%x).\n", __func__, 4041 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4085 vha->host_no, rval, mcp->mb[0]));
4086 } else 4042 } else
4087 DEBUG11(printk(KERN_INFO 4043 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
4088 "%s(%ld): done.\n", __func__, vha->host_no));
4089 4044
4090 return rval; 4045 return rval;
4091} 4046}
@@ -4100,12 +4055,11 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4100 mbx_cmd_t *mcp = &mc; 4055 mbx_cmd_t *mcp = &mc;
4101 struct qla_hw_data *ha = vha->hw; 4056 struct qla_hw_data *ha = vha->hw;
4102 4057
4058 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
4059
4103 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 4060 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4104 return QLA_FUNCTION_FAILED; 4061 return QLA_FUNCTION_FAILED;
4105 4062
4106 DEBUG11(printk(KERN_INFO
4107 "%s(%ld): entered.\n", __func__, vha->host_no));
4108
4109 mcp->mb[0] = MBC_PORT_PARAMS; 4063 mcp->mb[0] = MBC_PORT_PARAMS;
4110 mcp->mb[1] = loop_id; 4064 mcp->mb[1] = loop_id;
4111 if (ha->flags.fcp_prio_enabled) 4065 if (ha->flags.fcp_prio_enabled)
@@ -4127,12 +4081,9 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4127 } 4081 }
4128 4082
4129 if (rval != QLA_SUCCESS) { 4083 if (rval != QLA_SUCCESS) {
4130 DEBUG2_3_11(printk(KERN_WARNING 4084 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4131 "%s(%ld): failed=%x.\n", __func__,
4132 vha->host_no, rval));
4133 } else { 4085 } else {
4134 DEBUG11(printk(KERN_INFO 4086 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
4135 "%s(%ld): done.\n", __func__, vha->host_no));
4136 } 4087 }
4137 4088
4138 return rval; 4089 return rval;
@@ -4145,13 +4096,12 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4145 uint8_t byte; 4096 uint8_t byte;
4146 struct qla_hw_data *ha = vha->hw; 4097 struct qla_hw_data *ha = vha->hw;
4147 4098
4148 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); 4099 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
4149 4100
4150 /* Integer part */ 4101 /* Integer part */
4151 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4102 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4152 if (rval != QLA_SUCCESS) { 4103 if (rval != QLA_SUCCESS) {
4153 DEBUG2_3_11(printk(KERN_WARNING 4104 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4154 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4155 ha->flags.thermal_supported = 0; 4105 ha->flags.thermal_supported = 0;
4156 goto fail; 4106 goto fail;
4157 } 4107 }
@@ -4160,14 +4110,13 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4160 /* Fraction part */ 4110 /* Fraction part */
4161 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); 4111 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4162 if (rval != QLA_SUCCESS) { 4112 if (rval != QLA_SUCCESS) {
4163 DEBUG2_3_11(printk(KERN_WARNING 4113 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4164 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4165 ha->flags.thermal_supported = 0; 4114 ha->flags.thermal_supported = 0;
4166 goto fail; 4115 goto fail;
4167 } 4116 }
4168 *frac = (byte >> 6) * 25; 4117 *frac = (byte >> 6) * 25;
4169 4118
4170 DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); 4119 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
4171fail: 4120fail:
4172 return rval; 4121 return rval;
4173} 4122}
@@ -4180,12 +4129,11 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4180 mbx_cmd_t mc; 4129 mbx_cmd_t mc;
4181 mbx_cmd_t *mcp = &mc; 4130 mbx_cmd_t *mcp = &mc;
4182 4131
4132 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
4133
4183 if (!IS_FWI2_CAPABLE(ha)) 4134 if (!IS_FWI2_CAPABLE(ha))
4184 return QLA_FUNCTION_FAILED; 4135 return QLA_FUNCTION_FAILED;
4185 4136
4186 DEBUG11(qla_printk(KERN_INFO, ha,
4187 "%s(%ld): entered.\n", __func__, vha->host_no));
4188
4189 memset(mcp, 0, sizeof(mbx_cmd_t)); 4137 memset(mcp, 0, sizeof(mbx_cmd_t));
4190 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 4138 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4191 mcp->mb[1] = 1; 4139 mcp->mb[1] = 1;
@@ -4197,12 +4145,10 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4197 4145
4198 rval = qla2x00_mailbox_command(vha, mcp); 4146 rval = qla2x00_mailbox_command(vha, mcp);
4199 if (rval != QLA_SUCCESS) { 4147 if (rval != QLA_SUCCESS) {
4200 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 4148 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4201 "%s(%ld): failed=%x mb[0]=%x.\n", __func__, 4149 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4202 vha->host_no, rval, mcp->mb[0]));
4203 } else { 4150 } else {
4204 DEBUG11(qla_printk(KERN_INFO, ha, 4151 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
4205 "%s(%ld): done.\n", __func__, vha->host_no));
4206 } 4152 }
4207 4153
4208 return rval; 4154 return rval;
@@ -4216,12 +4162,11 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4216 mbx_cmd_t mc; 4162 mbx_cmd_t mc;
4217 mbx_cmd_t *mcp = &mc; 4163 mbx_cmd_t *mcp = &mc;
4218 4164
4165 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
4166
4219 if (!IS_QLA82XX(ha)) 4167 if (!IS_QLA82XX(ha))
4220 return QLA_FUNCTION_FAILED; 4168 return QLA_FUNCTION_FAILED;
4221 4169
4222 DEBUG11(qla_printk(KERN_INFO, ha,
4223 "%s(%ld): entered.\n", __func__, vha->host_no));
4224
4225 memset(mcp, 0, sizeof(mbx_cmd_t)); 4170 memset(mcp, 0, sizeof(mbx_cmd_t));
4226 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 4171 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4227 mcp->mb[1] = 0; 4172 mcp->mb[1] = 0;
@@ -4233,12 +4178,10 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4233 4178
4234 rval = qla2x00_mailbox_command(vha, mcp); 4179 rval = qla2x00_mailbox_command(vha, mcp);
4235 if (rval != QLA_SUCCESS) { 4180 if (rval != QLA_SUCCESS) {
4236 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 4181 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4237 "%s(%ld): failed=%x mb[0]=%x.\n", __func__, 4182 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4238 vha->host_no, rval, mcp->mb[0]));
4239 } else { 4183 } else {
4240 DEBUG11(qla_printk(KERN_INFO, ha, 4184 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
4241 "%s(%ld): done.\n", __func__, vha->host_no));
4242 } 4185 }
4243 4186
4244 return rval; 4187 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 5e343919acad..c706ed370000 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
36 mutex_lock(&ha->vport_lock); 36 mutex_lock(&ha->vport_lock);
37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); 37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
38 if (vp_id > ha->max_npiv_vports) { 38 if (vp_id > ha->max_npiv_vports) {
39 DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n", 39 ql_dbg(ql_dbg_vport, vha, 0xa000,
40 vp_id, ha->max_npiv_vports)); 40 "vp_id %d is bigger than max-supported %d.\n",
41 vp_id, ha->max_npiv_vports);
41 mutex_unlock(&ha->vport_lock); 42 mutex_unlock(&ha->vport_lock);
42 return vp_id; 43 return vp_id;
43 } 44 }
@@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
131 fc_port_t *fcport; 132 fc_port_t *fcport;
132 133
133 list_for_each_entry(fcport, &vha->vp_fcports, list) { 134 list_for_each_entry(fcport, &vha->vp_fcports, list) {
134 DEBUG15(printk("scsi(%ld): Marking port dead, " 135 ql_dbg(ql_dbg_vport, vha, 0xa001,
135 "loop_id=0x%04x :%x\n", 136 "Marking port dead, loop_id=0x%04x : %x.\n",
136 vha->host_no, fcport->loop_id, fcport->vp_idx)); 137 fcport->loop_id, fcport->vp_idx);
137 138
138 qla2x00_mark_device_lost(vha, fcport, 0, 0); 139 qla2x00_mark_device_lost(vha, fcport, 0, 0);
139 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 140 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
187 goto enable_failed; 188 goto enable_failed;
188 } 189 }
189 190
190 DEBUG15(qla_printk(KERN_INFO, ha, 191 ql_dbg(ql_dbg_taskm, vha, 0x801a,
191 "Virtual port with id: %d - Enabled\n", vha->vp_idx)); 192 "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
192 return 0; 193 return 0;
193 194
194enable_failed: 195enable_failed:
195 DEBUG15(qla_printk(KERN_INFO, ha, 196 ql_dbg(ql_dbg_taskm, vha, 0x801b,
196 "Virtual port with id: %d - Disabled\n", vha->vp_idx)); 197 "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
197 return 1; 198 return 1;
198} 199}
199 200
@@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
205 206
206 fc_vport = vha->fc_vport; 207 fc_vport = vha->fc_vport;
207 208
208 DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n", 209 ql_dbg(ql_dbg_vport, vha, 0xa002,
209 vha->host_no, __func__)); 210 "%s: change request #3.\n", __func__);
210 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 211 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
211 if (ret != QLA_SUCCESS) { 212 if (ret != QLA_SUCCESS) {
212 DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable " 213 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
213 "receiving of RSCN requests: 0x%x\n", ret)); 214 "receiving of RSCN requests: 0x%x.\n", ret);
214 return; 215 return;
215 } else { 216 } else {
216 /* Corresponds to SCR enabled */ 217 /* Corresponds to SCR enabled */
@@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
248 case MBA_CHG_IN_CONNECTION: 249 case MBA_CHG_IN_CONNECTION:
249 case MBA_PORT_UPDATE: 250 case MBA_PORT_UPDATE:
250 case MBA_RSCN_UPDATE: 251 case MBA_RSCN_UPDATE:
251 DEBUG15(printk("scsi(%ld)%s: Async_event for" 252 ql_dbg(ql_dbg_async, vha, 0x5024,
252 " VP[%d], mb = 0x%x, vha=%p\n", 253 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
253 vha->host_no, __func__, i, *mb, vha)); 254 i, *mb, vha);
254 qla2x00_async_event(vha, rsp, mb); 255 qla2x00_async_event(vha, rsp, mb);
255 break; 256 break;
256 } 257 }
@@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
286 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 287 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
287 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 288 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
288 289
289 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n", 290 ql_dbg(ql_dbg_taskm, vha, 0x801d,
290 vha->host_no, vha->vp_idx)); 291 "Scheduling enable of Vport %d.\n", vha->vp_idx);
291 return qla24xx_enable_vp(vha); 292 return qla24xx_enable_vp(vha);
292} 293}
293 294
294static int 295static int
295qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 296qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
296{ 297{
298 ql_dbg(ql_dbg_dpc, vha, 0x4012,
299 "Entering %s.\n", __func__);
300 ql_dbg(ql_dbg_dpc, vha, 0x4013,
301 "vp_flags: 0x%lx.\n", vha->vp_flags);
302
297 qla2x00_do_work(vha); 303 qla2x00_do_work(vha);
298 304
299 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 305 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
300 /* VP acquired. complete port configuration */ 306 /* VP acquired. complete port configuration */
307 ql_dbg(ql_dbg_dpc, vha, 0x4014,
308 "Configure VP scheduled.\n");
301 qla24xx_configure_vp(vha); 309 qla24xx_configure_vp(vha);
310 ql_dbg(ql_dbg_dpc, vha, 0x4015,
311 "Configure VP end.\n");
302 return 0; 312 return 0;
303 } 313 }
304 314
305 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { 315 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
316 ql_dbg(ql_dbg_dpc, vha, 0x4016,
317 "FCPort update scheduled.\n");
306 qla2x00_update_fcports(vha); 318 qla2x00_update_fcports(vha);
307 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); 319 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
320 ql_dbg(ql_dbg_dpc, vha, 0x4017,
321 "FCPort update end.\n");
308 } 322 }
309 323
310 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && 324 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
311 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 325 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
312 atomic_read(&vha->loop_state) != LOOP_DOWN) { 326 atomic_read(&vha->loop_state) != LOOP_DOWN) {
313 327
314 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 328 ql_dbg(ql_dbg_dpc, vha, 0x4018,
315 vha->host_no)); 329 "Relogin needed scheduled.\n");
316 qla2x00_relogin(vha); 330 qla2x00_relogin(vha);
317 331 ql_dbg(ql_dbg_dpc, vha, 0x4019,
318 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 332 "Relogin needed end.\n");
319 vha->host_no));
320 } 333 }
321 334
322 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 335 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
326 339
327 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 340 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
328 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 341 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
342 ql_dbg(ql_dbg_dpc, vha, 0x401a,
343 "Loop resync scheduled.\n");
329 qla2x00_loop_resync(vha); 344 qla2x00_loop_resync(vha);
330 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 345 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
346 ql_dbg(ql_dbg_dpc, vha, 0x401b,
347 "Loop resync end.\n");
331 } 348 }
332 } 349 }
333 350
351 ql_dbg(ql_dbg_dpc, vha, 0x401c,
352 "Exiting %s.\n", __func__);
334 return 0; 353 return 0;
335} 354}
336 355
@@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
396 415
397 /* Check up max-npiv-supports */ 416 /* Check up max-npiv-supports */
398 if (ha->num_vhosts > ha->max_npiv_vports) { 417 if (ha->num_vhosts > ha->max_npiv_vports) {
399 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than " 418 ql_dbg(ql_dbg_vport, vha, 0xa004,
400 "max_npv_vports %ud.\n", base_vha->host_no, 419 "num_vhosts %ud is bigger "
401 ha->num_vhosts, ha->max_npiv_vports)); 420 "than max_npiv_vports %ud.\n",
421 ha->num_vhosts, ha->max_npiv_vports);
402 return VPCERR_UNSUPPORTED; 422 return VPCERR_UNSUPPORTED;
403 } 423 }
404 return 0; 424 return 0;
@@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
415 435
416 vha = qla2x00_create_host(sht, ha); 436 vha = qla2x00_create_host(sht, ha);
417 if (!vha) { 437 if (!vha) {
418 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n")); 438 ql_log(ql_log_warn, vha, 0xa005,
439 "scsi_host_alloc() failed for vport.\n");
419 return(NULL); 440 return(NULL);
420 } 441 }
421 442
@@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
429 vha->device_flags = 0; 450 vha->device_flags = 0;
430 vha->vp_idx = qla24xx_allocate_vp_id(vha); 451 vha->vp_idx = qla24xx_allocate_vp_id(vha);
431 if (vha->vp_idx > ha->max_npiv_vports) { 452 if (vha->vp_idx > ha->max_npiv_vports) {
432 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 453 ql_dbg(ql_dbg_vport, vha, 0xa006,
433 vha->host_no)); 454 "Couldn't allocate vp_id.\n");
434 goto create_vhost_failed; 455 goto create_vhost_failed;
435 } 456 }
436 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 457 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
@@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
461 host->max_id = MAX_TARGETS_2200; 482 host->max_id = MAX_TARGETS_2200;
462 host->transportt = qla2xxx_transport_vport_template; 483 host->transportt = qla2xxx_transport_vport_template;
463 484
464 DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n", 485 ql_dbg(ql_dbg_vport, vha, 0xa007,
465 vha->host_no, vha)); 486 "Detect vport hba %ld at address = %p.\n",
487 vha->host_no, vha);
466 488
467 vha->flags.init_done = 1; 489 vha->flags.init_done = 1;
468 490
@@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
567 if (req) { 589 if (req) {
568 ret = qla25xx_delete_req_que(vha, req); 590 ret = qla25xx_delete_req_que(vha, req);
569 if (ret != QLA_SUCCESS) { 591 if (ret != QLA_SUCCESS) {
570 qla_printk(KERN_WARNING, ha, 592 ql_log(ql_log_warn, vha, 0x00ea,
571 "Couldn't delete req que %d\n", 593 "Couldn't delete req que %d.\n",
572 req->id); 594 req->id);
573 return ret; 595 return ret;
574 } 596 }
575 } 597 }
@@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
581 if (rsp) { 603 if (rsp) {
582 ret = qla25xx_delete_rsp_que(vha, rsp); 604 ret = qla25xx_delete_rsp_que(vha, rsp);
583 if (ret != QLA_SUCCESS) { 605 if (ret != QLA_SUCCESS) {
584 qla_printk(KERN_WARNING, ha, 606 ql_log(ql_log_warn, vha, 0x00eb,
585 "Couldn't delete rsp que %d\n", 607 "Couldn't delete rsp que %d.\n",
586 rsp->id); 608 rsp->id);
587 return ret; 609 return ret;
588 } 610 }
589 } 611 }
@@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 626
605 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 627 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
606 if (req == NULL) { 628 if (req == NULL) {
607 qla_printk(KERN_WARNING, ha, "could not allocate memory" 629 ql_log(ql_log_fatal, base_vha, 0x00d9,
608 "for request que\n"); 630 "Failed to allocate memory for request queue.\n");
609 goto failed; 631 goto failed;
610 } 632 }
611 633
@@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
614 (req->length + 1) * sizeof(request_t), 636 (req->length + 1) * sizeof(request_t),
615 &req->dma, GFP_KERNEL); 637 &req->dma, GFP_KERNEL);
616 if (req->ring == NULL) { 638 if (req->ring == NULL) {
617 qla_printk(KERN_WARNING, ha, 639 ql_log(ql_log_fatal, base_vha, 0x00da,
618 "Memory Allocation failed - request_ring\n"); 640 "Failed to allocte memory for request_ring.\n");
619 goto que_failed; 641 goto que_failed;
620 } 642 }
621 643
@@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
623 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 645 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
624 if (que_id >= ha->max_req_queues) { 646 if (que_id >= ha->max_req_queues) {
625 mutex_unlock(&ha->vport_lock); 647 mutex_unlock(&ha->vport_lock);
626 qla_printk(KERN_INFO, ha, "No resources to create " 648 ql_log(ql_log_warn, base_vha, 0x00db,
627 "additional request queue\n"); 649 "No resources to create additional request queue.\n");
628 goto que_failed; 650 goto que_failed;
629 } 651 }
630 set_bit(que_id, ha->req_qid_map); 652 set_bit(que_id, ha->req_qid_map);
@@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
633 req->vp_idx = vp_idx; 655 req->vp_idx = vp_idx;
634 req->qos = qos; 656 req->qos = qos;
635 657
658 ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
659 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
660 que_id, req->rid, req->vp_idx, req->qos);
661 ql_dbg(ql_dbg_init, base_vha, 0x00dc,
662 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
663 que_id, req->rid, req->vp_idx, req->qos);
636 if (rsp_que < 0) 664 if (rsp_que < 0)
637 req->rsp = NULL; 665 req->rsp = NULL;
638 else 666 else
@@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
645 options |= BIT_5; 673 options |= BIT_5;
646 req->options = options; 674 req->options = options;
647 675
676 ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
677 "options=0x%x.\n", req->options);
678 ql_dbg(ql_dbg_init, base_vha, 0x00dd,
679 "options=0x%x.\n", req->options);
648 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 680 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
649 req->outstanding_cmds[cnt] = NULL; 681 req->outstanding_cmds[cnt] = NULL;
650 req->current_outstanding_cmd = 1; 682 req->current_outstanding_cmd = 1;
@@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
656 reg = ISP_QUE_REG(ha, que_id); 688 reg = ISP_QUE_REG(ha, que_id);
657 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 689 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
658 mutex_unlock(&ha->vport_lock); 690 mutex_unlock(&ha->vport_lock);
691 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
692 "ring_ptr=%p ring_index=%d, "
693 "cnt=%d id=%d max_q_depth=%d.\n",
694 req->ring_ptr, req->ring_index,
695 req->cnt, req->id, req->max_q_depth);
696 ql_dbg(ql_dbg_init, base_vha, 0x00de,
697 "ring_ptr=%p ring_index=%d, "
698 "cnt=%d id=%d max_q_depth=%d.\n",
699 req->ring_ptr, req->ring_index, req->cnt,
700 req->id, req->max_q_depth);
659 701
660 ret = qla25xx_init_req_que(base_vha, req); 702 ret = qla25xx_init_req_que(base_vha, req);
661 if (ret != QLA_SUCCESS) { 703 if (ret != QLA_SUCCESS) {
662 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); 704 ql_log(ql_log_fatal, base_vha, 0x00df,
705 "%s failed.\n", __func__);
663 mutex_lock(&ha->vport_lock); 706 mutex_lock(&ha->vport_lock);
664 clear_bit(que_id, ha->req_qid_map); 707 clear_bit(que_id, ha->req_qid_map);
665 mutex_unlock(&ha->vport_lock); 708 mutex_unlock(&ha->vport_lock);
@@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
700 743
701 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 744 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
702 if (rsp == NULL) { 745 if (rsp == NULL) {
703 qla_printk(KERN_WARNING, ha, "could not allocate memory for" 746 ql_log(ql_log_warn, base_vha, 0x0066,
704 " response que\n"); 747 "Failed to allocate memory for response queue.\n");
705 goto failed; 748 goto failed;
706 } 749 }
707 750
@@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
710 (rsp->length + 1) * sizeof(response_t), 753 (rsp->length + 1) * sizeof(response_t),
711 &rsp->dma, GFP_KERNEL); 754 &rsp->dma, GFP_KERNEL);
712 if (rsp->ring == NULL) { 755 if (rsp->ring == NULL) {
713 qla_printk(KERN_WARNING, ha, 756 ql_log(ql_log_warn, base_vha, 0x00e1,
714 "Memory Allocation failed - response_ring\n"); 757 "Failed to allocate memory for response ring.\n");
715 goto que_failed; 758 goto que_failed;
716 } 759 }
717 760
@@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
719 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 762 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
720 if (que_id >= ha->max_rsp_queues) { 763 if (que_id >= ha->max_rsp_queues) {
721 mutex_unlock(&ha->vport_lock); 764 mutex_unlock(&ha->vport_lock);
722 qla_printk(KERN_INFO, ha, "No resources to create " 765 ql_log(ql_log_warn, base_vha, 0x00e2,
723 "additional response queue\n"); 766 "No resources to create additional request queue.\n");
724 goto que_failed; 767 goto que_failed;
725 } 768 }
726 set_bit(que_id, ha->rsp_qid_map); 769 set_bit(que_id, ha->rsp_qid_map);
@@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 if (ha->flags.msix_enabled) 771 if (ha->flags.msix_enabled)
729 rsp->msix = &ha->msix_entries[que_id + 1]; 772 rsp->msix = &ha->msix_entries[que_id + 1];
730 else 773 else
731 qla_printk(KERN_WARNING, ha, "msix not enabled\n"); 774 ql_log(ql_log_warn, base_vha, 0x00e3,
775 "MSIX not enalbled.\n");
732 776
733 ha->rsp_q_map[que_id] = rsp; 777 ha->rsp_q_map[que_id] = rsp;
734 rsp->rid = rid; 778 rsp->rid = rid;
735 rsp->vp_idx = vp_idx; 779 rsp->vp_idx = vp_idx;
736 rsp->hw = ha; 780 rsp->hw = ha;
781 ql_dbg(ql_dbg_init, base_vha, 0x00e4,
782 "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
783 que_id, rsp->rid, rsp->vp_idx, rsp->hw);
737 /* Use alternate PCI bus number */ 784 /* Use alternate PCI bus number */
738 if (MSB(rsp->rid)) 785 if (MSB(rsp->rid))
739 options |= BIT_4; 786 options |= BIT_4;
@@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
750 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 797 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
751 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; 798 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
752 mutex_unlock(&ha->vport_lock); 799 mutex_unlock(&ha->vport_lock);
800 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
801 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
802 rsp->options, rsp->id, rsp->rsp_q_in,
803 rsp->rsp_q_out);
804 ql_dbg(ql_dbg_init, base_vha, 0x00e5,
805 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
806 rsp->options, rsp->id, rsp->rsp_q_in,
807 rsp->rsp_q_out);
753 808
754 ret = qla25xx_request_irq(rsp); 809 ret = qla25xx_request_irq(rsp);
755 if (ret) 810 if (ret)
@@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
757 812
758 ret = qla25xx_init_rsp_que(base_vha, rsp); 813 ret = qla25xx_init_rsp_que(base_vha, rsp);
759 if (ret != QLA_SUCCESS) { 814 if (ret != QLA_SUCCESS) {
760 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); 815 ql_log(ql_log_fatal, base_vha, 0x00e7,
816 "%s failed.\n", __func__);
761 mutex_lock(&ha->vport_lock); 817 mutex_lock(&ha->vport_lock);
762 clear_bit(que_id, ha->rsp_qid_map); 818 clear_bit(que_id, ha->rsp_qid_map);
763 mutex_unlock(&ha->vport_lock); 819 mutex_unlock(&ha->vport_lock);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index e1138bcc834c..5cbf33a50b14 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -348,6 +348,7 @@ static void
348qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) 348qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
349{ 349{
350 u32 win_read; 350 u32 win_read;
351 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
351 352
352 ha->crb_win = CRB_HI(*off); 353 ha->crb_win = CRB_HI(*off);
353 writel(ha->crb_win, 354 writel(ha->crb_win,
@@ -358,9 +359,10 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
358 */ 359 */
359 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); 360 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
360 if (win_read != ha->crb_win) { 361 if (win_read != ha->crb_win) {
361 DEBUG2(qla_printk(KERN_INFO, ha, 362 ql_dbg(ql_dbg_p3p, vha, 0xb000,
362 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), " 363 "%s: Written crbwin (0x%x) "
363 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); 364 "!= Read crbwin (0x%x), off=0x%lx.\n",
365 ha->crb_win, win_read, *off);
364 } 366 }
365 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; 367 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
366} 368}
@@ -368,6 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
368static inline unsigned long 370static inline unsigned long
369qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) 371qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
370{ 372{
373 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
371 /* See if we are currently pointing to the region we want to use next */ 374 /* See if we are currently pointing to the region we want to use next */
372 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { 375 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
373 /* No need to change window. PCIX and PCIEregs are in both 376 /* No need to change window. PCIX and PCIEregs are in both
@@ -398,9 +401,10 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
398 return off; 401 return off;
399 } 402 }
400 /* strange address given */ 403 /* strange address given */
401 qla_printk(KERN_WARNING, ha, 404 ql_dbg(ql_dbg_p3p, vha, 0xb001,
402 "%s: Warning: unm_nic_pci_set_crbwindow called with" 405 "%x: Warning: unm_nic_pci_set_crbwindow "
403 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off); 406 "called with an unknown address(%llx).\n",
407 QLA2XXX_DRIVER_NAME, off);
404 return off; 408 return off;
405} 409}
406 410
@@ -563,6 +567,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
563{ 567{
564 int window; 568 int window;
565 u32 win_read; 569 u32 win_read;
570 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
566 571
567 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 572 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
568 QLA82XX_ADDR_DDR_NET_MAX)) { 573 QLA82XX_ADDR_DDR_NET_MAX)) {
@@ -574,8 +579,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
574 win_read = qla82xx_rd_32(ha, 579 win_read = qla82xx_rd_32(ha,
575 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); 580 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
576 if ((win_read << 17) != window) { 581 if ((win_read << 17) != window) {
577 qla_printk(KERN_WARNING, ha, 582 ql_dbg(ql_dbg_p3p, vha, 0xb003,
578 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", 583 "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
579 __func__, window, win_read); 584 __func__, window, win_read);
580 } 585 }
581 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 586 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
@@ -583,7 +588,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
583 QLA82XX_ADDR_OCM0_MAX)) { 588 QLA82XX_ADDR_OCM0_MAX)) {
584 unsigned int temp1; 589 unsigned int temp1;
585 if ((addr & 0x00ff800) == 0xff800) { 590 if ((addr & 0x00ff800) == 0xff800) {
586 qla_printk(KERN_WARNING, ha, 591 ql_log(ql_log_warn, vha, 0xb004,
587 "%s: QM access not handled.\n", __func__); 592 "%s: QM access not handled.\n", __func__);
588 addr = -1UL; 593 addr = -1UL;
589 } 594 }
@@ -596,8 +601,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
596 temp1 = ((window & 0x1FF) << 7) | 601 temp1 = ((window & 0x1FF) << 7) |
597 ((window & 0x0FFFE0000) >> 17); 602 ((window & 0x0FFFE0000) >> 17);
598 if (win_read != temp1) { 603 if (win_read != temp1) {
599 qla_printk(KERN_WARNING, ha, 604 ql_log(ql_log_warn, vha, 0xb005,
600 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", 605 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
601 __func__, temp1, win_read); 606 __func__, temp1, win_read);
602 } 607 }
603 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 608 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
@@ -612,8 +617,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
612 win_read = qla82xx_rd_32(ha, 617 win_read = qla82xx_rd_32(ha,
613 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); 618 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
614 if (win_read != window) { 619 if (win_read != window) {
615 qla_printk(KERN_WARNING, ha, 620 ql_log(ql_log_warn, vha, 0xb006,
616 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", 621 "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
617 __func__, window, win_read); 622 __func__, window, win_read);
618 } 623 }
619 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; 624 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
@@ -624,9 +629,9 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
624 */ 629 */
625 if ((qla82xx_pci_set_window_warning_count++ < 8) || 630 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
626 (qla82xx_pci_set_window_warning_count%64 == 0)) { 631 (qla82xx_pci_set_window_warning_count%64 == 0)) {
627 qla_printk(KERN_WARNING, ha, 632 ql_log(ql_log_warn, vha, 0xb007,
628 "%s: Warning:%s Unknown address range!\n", __func__, 633 "%s: Warning:%s Unknown address range!.\n",
629 QLA2XXX_DRIVER_NAME); 634 __func__, QLA2XXX_DRIVER_NAME);
630 } 635 }
631 addr = -1UL; 636 addr = -1UL;
632 } 637 }
@@ -671,6 +676,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
671 uint8_t *mem_ptr = NULL; 676 uint8_t *mem_ptr = NULL;
672 unsigned long mem_base; 677 unsigned long mem_base;
673 unsigned long mem_page; 678 unsigned long mem_page;
679 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
674 680
675 write_lock_irqsave(&ha->hw_lock, flags); 681 write_lock_irqsave(&ha->hw_lock, flags);
676 682
@@ -682,9 +688,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
682 if ((start == -1UL) || 688 if ((start == -1UL) ||
683 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 689 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
684 write_unlock_irqrestore(&ha->hw_lock, flags); 690 write_unlock_irqrestore(&ha->hw_lock, flags);
685 qla_printk(KERN_ERR, ha, 691 ql_log(ql_log_fatal, vha, 0xb008,
686 "%s out of bound pci memory access. " 692 "%s out of bound pci memory "
687 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 693 "access, offset is 0x%llx.\n",
694 QLA2XXX_DRIVER_NAME, off);
688 return -1; 695 return -1;
689 } 696 }
690 697
@@ -741,6 +748,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
741 uint8_t *mem_ptr = NULL; 748 uint8_t *mem_ptr = NULL;
742 unsigned long mem_base; 749 unsigned long mem_base;
743 unsigned long mem_page; 750 unsigned long mem_page;
751 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
744 752
745 write_lock_irqsave(&ha->hw_lock, flags); 753 write_lock_irqsave(&ha->hw_lock, flags);
746 754
@@ -752,9 +760,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
752 if ((start == -1UL) || 760 if ((start == -1UL) ||
753 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 761 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
754 write_unlock_irqrestore(&ha->hw_lock, flags); 762 write_unlock_irqrestore(&ha->hw_lock, flags);
755 qla_printk(KERN_ERR, ha, 763 ql_log(ql_log_fatal, vha, 0xb009,
756 "%s out of bound pci memory access. " 764 "%s out of bount memory "
757 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 765 "access, offset is 0x%llx.\n",
766 QLA2XXX_DRIVER_NAME, off);
758 return -1; 767 return -1;
759 } 768 }
760 769
@@ -855,15 +864,16 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha)
855{ 864{
856 long timeout = 0; 865 long timeout = 0;
857 long done = 0 ; 866 long done = 0 ;
867 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
858 868
859 while (done == 0) { 869 while (done == 0) {
860 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 870 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
861 done &= 4; 871 done &= 4;
862 timeout++; 872 timeout++;
863 if (timeout >= rom_max_timeout) { 873 if (timeout >= rom_max_timeout) {
864 DEBUG(qla_printk(KERN_INFO, ha, 874 ql_dbg(ql_dbg_p3p, vha, 0xb00a,
865 "%s: Timeout reached waiting for rom busy", 875 "%s: Timeout reached waiting for rom busy.\n",
866 QLA2XXX_DRIVER_NAME)); 876 QLA2XXX_DRIVER_NAME);
867 return -1; 877 return -1;
868 } 878 }
869 } 879 }
@@ -875,15 +885,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
875{ 885{
876 long timeout = 0; 886 long timeout = 0;
877 long done = 0 ; 887 long done = 0 ;
888 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
878 889
879 while (done == 0) { 890 while (done == 0) {
880 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 891 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
881 done &= 2; 892 done &= 2;
882 timeout++; 893 timeout++;
883 if (timeout >= rom_max_timeout) { 894 if (timeout >= rom_max_timeout) {
884 DEBUG(qla_printk(KERN_INFO, ha, 895 ql_dbg(ql_dbg_p3p, vha, 0xb00b,
885 "%s: Timeout reached waiting for rom done", 896 "%s: Timeout reached waiting for rom done.\n",
886 QLA2XXX_DRIVER_NAME)); 897 QLA2XXX_DRIVER_NAME);
887 return -1; 898 return -1;
888 } 899 }
889 } 900 }
@@ -893,15 +904,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
893static int 904static int
894qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 905qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
895{ 906{
907 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
908
896 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 909 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
897 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 910 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
898 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 911 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
899 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); 912 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
900 qla82xx_wait_rom_busy(ha); 913 qla82xx_wait_rom_busy(ha);
901 if (qla82xx_wait_rom_done(ha)) { 914 if (qla82xx_wait_rom_done(ha)) {
902 qla_printk(KERN_WARNING, ha, 915 ql_log(ql_log_fatal, vha, 0x00ba,
903 "%s: Error waiting for rom done\n", 916 "Error waiting for rom done.\n");
904 QLA2XXX_DRIVER_NAME);
905 return -1; 917 return -1;
906 } 918 }
907 /* Reset abyte_cnt and dummy_byte_cnt */ 919 /* Reset abyte_cnt and dummy_byte_cnt */
@@ -917,6 +929,7 @@ static int
917qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 929qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
918{ 930{
919 int ret, loops = 0; 931 int ret, loops = 0;
932 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
920 933
921 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 934 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
922 udelay(100); 935 udelay(100);
@@ -924,9 +937,8 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
924 loops++; 937 loops++;
925 } 938 }
926 if (loops >= 50000) { 939 if (loops >= 50000) {
927 qla_printk(KERN_INFO, ha, 940 ql_log(ql_log_fatal, vha, 0x00b9,
928 "%s: qla82xx_rom_lock failed\n", 941 "Failed to aquire SEM2 lock.\n");
929 QLA2XXX_DRIVER_NAME);
930 return -1; 942 return -1;
931 } 943 }
932 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 944 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -937,11 +949,12 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
937static int 949static int
938qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) 950qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
939{ 951{
952 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
940 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); 953 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
941 qla82xx_wait_rom_busy(ha); 954 qla82xx_wait_rom_busy(ha);
942 if (qla82xx_wait_rom_done(ha)) { 955 if (qla82xx_wait_rom_done(ha)) {
943 qla_printk(KERN_WARNING, ha, 956 ql_log(ql_log_warn, vha, 0xb00c,
944 "Error waiting for rom done\n"); 957 "Error waiting for rom done.\n");
945 return -1; 958 return -1;
946 } 959 }
947 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 960 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
@@ -955,6 +968,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
955 uint32_t done = 1 ; 968 uint32_t done = 1 ;
956 uint32_t val; 969 uint32_t val;
957 int ret = 0; 970 int ret = 0;
971 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
958 972
959 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 973 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
960 while ((done != 0) && (ret == 0)) { 974 while ((done != 0) && (ret == 0)) {
@@ -964,8 +978,8 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
964 udelay(10); 978 udelay(10);
965 cond_resched(); 979 cond_resched();
966 if (timeout >= 50000) { 980 if (timeout >= 50000) {
967 qla_printk(KERN_WARNING, ha, 981 ql_log(ql_log_warn, vha, 0xb00d,
968 "Timeout reached waiting for write finish"); 982 "Timeout reached waiting for write finish.\n");
969 return -1; 983 return -1;
970 } 984 }
971 } 985 }
@@ -992,13 +1006,14 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
992static int 1006static int
993qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) 1007qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
994{ 1008{
1009 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
995 if (qla82xx_flash_set_write_enable(ha)) 1010 if (qla82xx_flash_set_write_enable(ha))
996 return -1; 1011 return -1;
997 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); 1012 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
998 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); 1013 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
999 if (qla82xx_wait_rom_done(ha)) { 1014 if (qla82xx_wait_rom_done(ha)) {
1000 qla_printk(KERN_WARNING, ha, 1015 ql_log(ql_log_warn, vha, 0xb00e,
1001 "Error waiting for rom done\n"); 1016 "Error waiting for rom done.\n");
1002 return -1; 1017 return -1;
1003 } 1018 }
1004 return qla82xx_flash_wait_write_finish(ha); 1019 return qla82xx_flash_wait_write_finish(ha);
@@ -1007,10 +1022,11 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1007static int 1022static int
1008qla82xx_write_disable_flash(struct qla_hw_data *ha) 1023qla82xx_write_disable_flash(struct qla_hw_data *ha)
1009{ 1024{
1025 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1010 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); 1026 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1011 if (qla82xx_wait_rom_done(ha)) { 1027 if (qla82xx_wait_rom_done(ha)) {
1012 qla_printk(KERN_WARNING, ha, 1028 ql_log(ql_log_warn, vha, 0xb00f,
1013 "Error waiting for rom done\n"); 1029 "Error waiting for rom done.\n");
1014 return -1; 1030 return -1;
1015 } 1031 }
1016 return 0; 1032 return 0;
@@ -1020,13 +1036,16 @@ static int
1020ql82xx_rom_lock_d(struct qla_hw_data *ha) 1036ql82xx_rom_lock_d(struct qla_hw_data *ha)
1021{ 1037{
1022 int loops = 0; 1038 int loops = 0;
1039 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040
1023 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 1041 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1024 udelay(100); 1042 udelay(100);
1025 cond_resched(); 1043 cond_resched();
1026 loops++; 1044 loops++;
1027 } 1045 }
1028 if (loops >= 50000) { 1046 if (loops >= 50000) {
1029 qla_printk(KERN_WARNING, ha, "ROM lock failed\n"); 1047 ql_log(ql_log_warn, vha, 0xb010,
1048 "ROM lock failed.\n");
1030 return -1; 1049 return -1;
1031 } 1050 }
1032 return 0;; 1051 return 0;;
@@ -1037,10 +1056,12 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1037 uint32_t data) 1056 uint32_t data)
1038{ 1057{
1039 int ret = 0; 1058 int ret = 0;
1059 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040 1060
1041 ret = ql82xx_rom_lock_d(ha); 1061 ret = ql82xx_rom_lock_d(ha);
1042 if (ret < 0) { 1062 if (ret < 0) {
1043 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 1063 ql_log(ql_log_warn, vha, 0xb011,
1064 "ROM lock failed.\n");
1044 return ret; 1065 return ret;
1045 } 1066 }
1046 1067
@@ -1053,8 +1074,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1053 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); 1074 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1054 qla82xx_wait_rom_busy(ha); 1075 qla82xx_wait_rom_busy(ha);
1055 if (qla82xx_wait_rom_done(ha)) { 1076 if (qla82xx_wait_rom_done(ha)) {
1056 qla_printk(KERN_WARNING, ha, 1077 ql_log(ql_log_warn, vha, 0xb012,
1057 "Error waiting for rom done\n"); 1078 "Error waiting for rom done.\n");
1058 ret = -1; 1079 ret = -1;
1059 goto done_write; 1080 goto done_write;
1060 } 1081 }
@@ -1159,8 +1180,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1159 */ 1180 */
1160 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || 1181 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1161 qla82xx_rom_fast_read(ha, 4, &n) != 0) { 1182 qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1162 qla_printk(KERN_WARNING, ha, 1183 ql_log(ql_log_fatal, vha, 0x006e,
1163 "[ERROR] Reading crb_init area: n: %08x\n", n); 1184 "Error Reading crb_init area: n: %08x.\n", n);
1164 return -1; 1185 return -1;
1165 } 1186 }
1166 1187
@@ -1172,20 +1193,18 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1172 1193
1173 /* number of addr/value pair should not exceed 1024 enteries */ 1194 /* number of addr/value pair should not exceed 1024 enteries */
1174 if (n >= 1024) { 1195 if (n >= 1024) {
1175 qla_printk(KERN_WARNING, ha, 1196 ql_log(ql_log_fatal, vha, 0x0071,
1176 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", 1197 "Card flash not initialized:n=0x%x.\n", n);
1177 QLA2XXX_DRIVER_NAME, __func__, n);
1178 return -1; 1198 return -1;
1179 } 1199 }
1180 1200
1181 qla_printk(KERN_INFO, ha, 1201 ql_log(ql_log_info, vha, 0x0072,
1182 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n); 1202 "%d CRB init values found in ROM.\n", n);
1183 1203
1184 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); 1204 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1185 if (buf == NULL) { 1205 if (buf == NULL) {
1186 qla_printk(KERN_WARNING, ha, 1206 ql_log(ql_log_fatal, vha, 0x010c,
1187 "%s: [ERROR] Unable to malloc memory.\n", 1207 "Unable to allocate memory.\n");
1188 QLA2XXX_DRIVER_NAME);
1189 return -1; 1208 return -1;
1190 } 1209 }
1191 1210
@@ -1236,9 +1255,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1236 continue; 1255 continue;
1237 1256
1238 if (off == ADDR_ERROR) { 1257 if (off == ADDR_ERROR) {
1239 qla_printk(KERN_WARNING, ha, 1258 ql_log(ql_log_fatal, vha, 0x0116,
1240 "%s: [ERROR] Unknown addr: 0x%08lx\n", 1259 "Unknow addr: 0x%08lx.\n", buf[i].addr);
1241 QLA2XXX_DRIVER_NAME, buf[i].addr);
1242 continue; 1260 continue;
1243 } 1261 }
1244 1262
@@ -1370,7 +1388,7 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1370 if (j >= MAX_CTL_CHECK) { 1388 if (j >= MAX_CTL_CHECK) {
1371 if (printk_ratelimit()) 1389 if (printk_ratelimit())
1372 dev_err(&ha->pdev->dev, 1390 dev_err(&ha->pdev->dev,
1373 "failed to write through agent\n"); 1391 "failed to write through agent.\n");
1374 ret = -1; 1392 ret = -1;
1375 break; 1393 break;
1376 } 1394 }
@@ -1460,7 +1478,7 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1460 if (j >= MAX_CTL_CHECK) { 1478 if (j >= MAX_CTL_CHECK) {
1461 if (printk_ratelimit()) 1479 if (printk_ratelimit())
1462 dev_err(&ha->pdev->dev, 1480 dev_err(&ha->pdev->dev,
1463 "failed to read through agent\n"); 1481 "failed to read through agent.\n");
1464 break; 1482 break;
1465 } 1483 }
1466 1484
@@ -1633,17 +1651,15 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1633 uint32_t len = 0; 1651 uint32_t len = 0;
1634 1652
1635 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { 1653 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1636 qla_printk(KERN_WARNING, ha, 1654 ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
1637 "Failed to reserve selected regions (%s)\n", 1655 "Failed to reserver selected regions.\n");
1638 pci_name(ha->pdev));
1639 goto iospace_error_exit; 1656 goto iospace_error_exit;
1640 } 1657 }
1641 1658
1642 /* Use MMIO operations for all accesses. */ 1659 /* Use MMIO operations for all accesses. */
1643 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1660 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1644 qla_printk(KERN_ERR, ha, 1661 ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
1645 "region #0 not an MMIO resource (%s), aborting\n", 1662 "Region #0 not an MMIO resource, aborting.\n");
1646 pci_name(ha->pdev));
1647 goto iospace_error_exit; 1663 goto iospace_error_exit;
1648 } 1664 }
1649 1665
@@ -1651,9 +1667,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1651 ha->nx_pcibase = 1667 ha->nx_pcibase =
1652 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); 1668 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1653 if (!ha->nx_pcibase) { 1669 if (!ha->nx_pcibase) {
1654 qla_printk(KERN_ERR, ha, 1670 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1655 "cannot remap pcibase MMIO (%s), aborting\n", 1671 "Cannot remap pcibase MMIO, aborting.\n");
1656 pci_name(ha->pdev));
1657 pci_release_regions(ha->pdev); 1672 pci_release_regions(ha->pdev);
1658 goto iospace_error_exit; 1673 goto iospace_error_exit;
1659 } 1674 }
@@ -1667,9 +1682,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1667 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + 1682 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1668 (ha->pdev->devfn << 12)), 4); 1683 (ha->pdev->devfn << 12)), 4);
1669 if (!ha->nxdb_wr_ptr) { 1684 if (!ha->nxdb_wr_ptr) {
1670 qla_printk(KERN_ERR, ha, 1685 ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
1671 "cannot remap MMIO (%s), aborting\n", 1686 "Cannot remap MMIO, aborting.\n");
1672 pci_name(ha->pdev));
1673 pci_release_regions(ha->pdev); 1687 pci_release_regions(ha->pdev);
1674 goto iospace_error_exit; 1688 goto iospace_error_exit;
1675 } 1689 }
@@ -1687,6 +1701,16 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1687 1701
1688 ha->max_req_queues = ha->max_rsp_queues = 1; 1702 ha->max_req_queues = ha->max_rsp_queues = 1;
1689 ha->msix_count = ha->max_rsp_queues + 1; 1703 ha->msix_count = ha->max_rsp_queues + 1;
1704 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1705 "nx_pci_base=%p iobase=%p "
1706 "max_req_queues=%d msix_count=%d.\n",
1707 ha->nx_pcibase, ha->iobase,
1708 ha->max_req_queues, ha->msix_count);
1709 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1710 "nx_pci_base=%p iobase=%p "
1711 "max_req_queues=%d msix_count=%d.\n",
1712 ha->nx_pcibase, ha->iobase,
1713 ha->max_req_queues, ha->msix_count);
1690 return 0; 1714 return 0;
1691 1715
1692iospace_error_exit: 1716iospace_error_exit:
@@ -1712,6 +1736,9 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
1712 pci_set_master(ha->pdev); 1736 pci_set_master(ha->pdev);
1713 ret = pci_set_mwi(ha->pdev); 1737 ret = pci_set_mwi(ha->pdev);
1714 ha->chip_revision = ha->pdev->revision; 1738 ha->chip_revision = ha->pdev->revision;
1739 ql_dbg(ql_dbg_init, vha, 0x0043,
1740 "Chip revision:%ld.\n",
1741 ha->chip_revision);
1715 return 0; 1742 return 0;
1716} 1743}
1717 1744
@@ -1877,6 +1904,7 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1877{ 1904{
1878 u32 val = 0; 1905 u32 val = 0;
1879 int retries = 60; 1906 int retries = 60;
1907 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1880 1908
1881 do { 1909 do {
1882 read_lock(&ha->hw_lock); 1910 read_lock(&ha->hw_lock);
@@ -1892,15 +1920,15 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1892 default: 1920 default:
1893 break; 1921 break;
1894 } 1922 }
1895 qla_printk(KERN_WARNING, ha, 1923 ql_log(ql_log_info, vha, 0x00a8,
1896 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n", 1924 "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
1897 val, retries); 1925 val, retries);
1898 1926
1899 msleep(500); 1927 msleep(500);
1900 1928
1901 } while (--retries); 1929 } while (--retries);
1902 1930
1903 qla_printk(KERN_INFO, ha, 1931 ql_log(ql_log_fatal, vha, 0x00a9,
1904 "Cmd Peg initialization failed: 0x%x.\n", val); 1932 "Cmd Peg initialization failed: 0x%x.\n", val);
1905 1933
1906 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1934 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1915,6 +1943,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1915{ 1943{
1916 u32 val = 0; 1944 u32 val = 0;
1917 int retries = 60; 1945 int retries = 60;
1946 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1918 1947
1919 do { 1948 do {
1920 read_lock(&ha->hw_lock); 1949 read_lock(&ha->hw_lock);
@@ -1930,17 +1959,16 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1930 default: 1959 default:
1931 break; 1960 break;
1932 } 1961 }
1933 1962 ql_log(ql_log_info, vha, 0x00ab,
1934 qla_printk(KERN_WARNING, ha, 1963 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
1935 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n", 1964 val, retries);
1936 val, retries);
1937 1965
1938 msleep(500); 1966 msleep(500);
1939 1967
1940 } while (--retries); 1968 } while (--retries);
1941 1969
1942 qla_printk(KERN_INFO, ha, 1970 ql_log(ql_log_fatal, vha, 0x00ac,
1943 "Rcv Peg initialization failed: 0x%x.\n", val); 1971 "Rcv Peg initializatin failed: 0x%x.\n", val);
1944 read_lock(&ha->hw_lock); 1972 read_lock(&ha->hw_lock);
1945 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); 1973 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1946 read_unlock(&ha->hw_lock); 1974 read_unlock(&ha->hw_lock);
@@ -1989,13 +2017,11 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1989 } 2017 }
1990 2018
1991 if (ha->mcp) { 2019 if (ha->mcp) {
1992 DEBUG3_11(printk(KERN_INFO "%s(%ld): " 2020 ql_dbg(ql_dbg_async, vha, 0x5052,
1993 "Got mailbox completion. cmd=%x.\n", 2021 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1994 __func__, vha->host_no, ha->mcp->mb[0]));
1995 } else { 2022 } else {
1996 qla_printk(KERN_INFO, ha, 2023 ql_dbg(ql_dbg_async, vha, 0x5053,
1997 "%s(%ld): MBX pointer ERROR!\n", 2024 "MBX pointer ERROR.\n");
1998 __func__, vha->host_no);
1999 } 2025 }
2000} 2026}
2001 2027
@@ -2019,13 +2045,13 @@ qla82xx_intr_handler(int irq, void *dev_id)
2019 int status = 0, status1 = 0; 2045 int status = 0, status1 = 0;
2020 unsigned long flags; 2046 unsigned long flags;
2021 unsigned long iter; 2047 unsigned long iter;
2022 uint32_t stat; 2048 uint32_t stat = 0;
2023 uint16_t mb[4]; 2049 uint16_t mb[4];
2024 2050
2025 rsp = (struct rsp_que *) dev_id; 2051 rsp = (struct rsp_que *) dev_id;
2026 if (!rsp) { 2052 if (!rsp) {
2027 printk(KERN_INFO 2053 printk(KERN_INFO
2028 "%s(): NULL response queue pointer\n", __func__); 2054 "%s(): NULL response queue pointer.\n", __func__);
2029 return IRQ_NONE; 2055 return IRQ_NONE;
2030 } 2056 }
2031 ha = rsp->hw; 2057 ha = rsp->hw;
@@ -2075,9 +2101,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
2075 qla24xx_process_response_queue(vha, rsp); 2101 qla24xx_process_response_queue(vha, rsp);
2076 break; 2102 break;
2077 default: 2103 default:
2078 DEBUG2(printk("scsi(%ld): " 2104 ql_dbg(ql_dbg_async, vha, 0x5054,
2079 " Unrecognized interrupt type (%d).\n", 2105 "Unrecognized interrupt type (%d).\n",
2080 vha->host_no, stat & 0xff)); 2106 stat & 0xff);
2081 break; 2107 break;
2082 } 2108 }
2083 } 2109 }
@@ -2089,8 +2115,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
2089 2115
2090#ifdef QL_DEBUG_LEVEL_17 2116#ifdef QL_DEBUG_LEVEL_17
2091 if (!irq && ha->flags.eeh_busy) 2117 if (!irq && ha->flags.eeh_busy)
2092 qla_printk(KERN_WARNING, ha, 2118 ql_log(ql_log_warn, vha, 0x503d,
2093 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2119 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2094 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2120 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2095#endif 2121#endif
2096 2122
@@ -2111,13 +2137,13 @@ qla82xx_msix_default(int irq, void *dev_id)
2111 struct device_reg_82xx __iomem *reg; 2137 struct device_reg_82xx __iomem *reg;
2112 int status = 0; 2138 int status = 0;
2113 unsigned long flags; 2139 unsigned long flags;
2114 uint32_t stat; 2140 uint32_t stat = 0;
2115 uint16_t mb[4]; 2141 uint16_t mb[4];
2116 2142
2117 rsp = (struct rsp_que *) dev_id; 2143 rsp = (struct rsp_que *) dev_id;
2118 if (!rsp) { 2144 if (!rsp) {
2119 printk(KERN_INFO 2145 printk(KERN_INFO
2120 "%s(): NULL response queue pointer\n", __func__); 2146 "%s(): NULL response queue pointer.\n", __func__);
2121 return IRQ_NONE; 2147 return IRQ_NONE;
2122 } 2148 }
2123 ha = rsp->hw; 2149 ha = rsp->hw;
@@ -2149,9 +2175,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2149 qla24xx_process_response_queue(vha, rsp); 2175 qla24xx_process_response_queue(vha, rsp);
2150 break; 2176 break;
2151 default: 2177 default:
2152 DEBUG2(printk("scsi(%ld): " 2178 ql_dbg(ql_dbg_async, vha, 0x5041,
2153 " Unrecognized interrupt type (%d).\n", 2179 "Unrecognized interrupt type (%d).\n",
2154 vha->host_no, stat & 0xff)); 2180 stat & 0xff);
2155 break; 2181 break;
2156 } 2182 }
2157 } 2183 }
@@ -2162,9 +2188,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2162 2188
2163#ifdef QL_DEBUG_LEVEL_17 2189#ifdef QL_DEBUG_LEVEL_17
2164 if (!irq && ha->flags.eeh_busy) 2190 if (!irq && ha->flags.eeh_busy)
2165 qla_printk(KERN_WARNING, ha, 2191 ql_log(ql_log_warn, vha, 0x5044,
2166 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2192 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2167 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2193 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2168#endif 2194#endif
2169 2195
2170 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2196 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
@@ -2186,7 +2212,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2186 rsp = (struct rsp_que *) dev_id; 2212 rsp = (struct rsp_que *) dev_id;
2187 if (!rsp) { 2213 if (!rsp) {
2188 printk(KERN_INFO 2214 printk(KERN_INFO
2189 "%s(): NULL response queue pointer\n", __func__); 2215 "%s(): NULL response queue pointer.\n", __func__);
2190 return IRQ_NONE; 2216 return IRQ_NONE;
2191 } 2217 }
2192 2218
@@ -2215,7 +2241,7 @@ qla82xx_poll(int irq, void *dev_id)
2215 rsp = (struct rsp_que *) dev_id; 2241 rsp = (struct rsp_que *) dev_id;
2216 if (!rsp) { 2242 if (!rsp) {
2217 printk(KERN_INFO 2243 printk(KERN_INFO
2218 "%s(): NULL response queue pointer\n", __func__); 2244 "%s(): NULL response queue pointer.\n", __func__);
2219 return; 2245 return;
2220 } 2246 }
2221 ha = rsp->hw; 2247 ha = rsp->hw;
@@ -2245,9 +2271,9 @@ qla82xx_poll(int irq, void *dev_id)
2245 qla24xx_process_response_queue(vha, rsp); 2271 qla24xx_process_response_queue(vha, rsp);
2246 break; 2272 break;
2247 default: 2273 default:
2248 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2274 ql_dbg(ql_dbg_p3p, vha, 0xb013,
2249 "(%d).\n", 2275 "Unrecognized interrupt type (%d).\n",
2250 vha->host_no, stat & 0xff)); 2276 stat * 0xff);
2251 break; 2277 break;
2252 } 2278 }
2253 } 2279 }
@@ -2347,9 +2373,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2347 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2373 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2348 } 2374 }
2349 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2375 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2350 qla_printk(KERN_INFO, ha, 2376 ql_log(ql_log_info, vha, 0x00bb,
2351 "%s(%ld):drv_state = 0x%x\n", 2377 "drv_state = 0x%x.\n", drv_state);
2352 __func__, vha->host_no, drv_state);
2353 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2378 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2354} 2379}
2355 2380
@@ -2392,8 +2417,8 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2392 struct qla_hw_data *ha = vha->hw; 2417 struct qla_hw_data *ha = vha->hw;
2393 2418
2394 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { 2419 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2395 qla_printk(KERN_ERR, ha, 2420 ql_log(ql_log_fatal, vha, 0x009f,
2396 "%s: Error during CRB Initialization\n", __func__); 2421 "Error during CRB initialization.\n");
2397 return QLA_FUNCTION_FAILED; 2422 return QLA_FUNCTION_FAILED;
2398 } 2423 }
2399 udelay(500); 2424 udelay(500);
@@ -2411,27 +2436,27 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2411 if (ql2xfwloadbin == 2) 2436 if (ql2xfwloadbin == 2)
2412 goto try_blob_fw; 2437 goto try_blob_fw;
2413 2438
2414 qla_printk(KERN_INFO, ha, 2439 ql_log(ql_log_info, vha, 0x00a0,
2415 "Attempting to load firmware from flash\n"); 2440 "Attempting to load firmware from flash.\n");
2416 2441
2417 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2442 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2418 qla_printk(KERN_ERR, ha, 2443 ql_log(ql_log_info, vha, 0x00a1,
2419 "Firmware loaded successfully from flash\n"); 2444 "Firmware loaded successully from flash.\n");
2420 return QLA_SUCCESS; 2445 return QLA_SUCCESS;
2421 } else { 2446 } else {
2422 qla_printk(KERN_ERR, ha, 2447 ql_log(ql_log_warn, vha, 0x0108,
2423 "Firmware load from flash failed\n"); 2448 "Firmware load from flash failed.\n");
2424 } 2449 }
2425 2450
2426try_blob_fw: 2451try_blob_fw:
2427 qla_printk(KERN_INFO, ha, 2452 ql_log(ql_log_info, vha, 0x00a2,
2428 "Attempting to load firmware from blob\n"); 2453 "Attempting to load firmware from blob.\n");
2429 2454
2430 /* Load firmware blob. */ 2455 /* Load firmware blob. */
2431 blob = ha->hablob = qla2x00_request_firmware(vha); 2456 blob = ha->hablob = qla2x00_request_firmware(vha);
2432 if (!blob) { 2457 if (!blob) {
2433 qla_printk(KERN_ERR, ha, 2458 ql_log(ql_log_fatal, vha, 0x00a3,
2434 "Firmware image not present.\n"); 2459 "Firmware image not preset.\n");
2435 goto fw_load_failed; 2460 goto fw_load_failed;
2436 } 2461 }
2437 2462
@@ -2441,20 +2466,19 @@ try_blob_fw:
2441 /* Fallback to URI format */ 2466 /* Fallback to URI format */
2442 if (qla82xx_validate_firmware_blob(vha, 2467 if (qla82xx_validate_firmware_blob(vha,
2443 QLA82XX_UNIFIED_ROMIMAGE)) { 2468 QLA82XX_UNIFIED_ROMIMAGE)) {
2444 qla_printk(KERN_ERR, ha, 2469 ql_log(ql_log_fatal, vha, 0x00a4,
2445 "No valid firmware image found!!!"); 2470 "No valid firmware image found.\n");
2446 return QLA_FUNCTION_FAILED; 2471 return QLA_FUNCTION_FAILED;
2447 } 2472 }
2448 } 2473 }
2449 2474
2450 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { 2475 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2451 qla_printk(KERN_ERR, ha, 2476 ql_log(ql_log_info, vha, 0x00a5,
2452 "%s: Firmware loaded successfully " 2477 "Firmware loaded successfully from binary blob.\n");
2453 " from binary blob\n", __func__);
2454 return QLA_SUCCESS; 2478 return QLA_SUCCESS;
2455 } else { 2479 } else {
2456 qla_printk(KERN_ERR, ha, 2480 ql_log(ql_log_fatal, vha, 0x00a6,
2457 "Firmware load failed from binary blob\n"); 2481 "Firmware load failed for binary blob.\n");
2458 blob->fw = NULL; 2482 blob->fw = NULL;
2459 blob = NULL; 2483 blob = NULL;
2460 goto fw_load_failed; 2484 goto fw_load_failed;
@@ -2486,15 +2510,15 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2486 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 2510 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2487 2511
2488 if (qla82xx_load_fw(vha) != QLA_SUCCESS) { 2512 if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2489 qla_printk(KERN_INFO, ha, 2513 ql_log(ql_log_fatal, vha, 0x00a7,
2490 "%s: Error trying to start fw!\n", __func__); 2514 "Error trying to start fw.\n");
2491 return QLA_FUNCTION_FAILED; 2515 return QLA_FUNCTION_FAILED;
2492 } 2516 }
2493 2517
2494 /* Handshake with the card before we register the devices. */ 2518 /* Handshake with the card before we register the devices. */
2495 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { 2519 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2496 qla_printk(KERN_INFO, ha, 2520 ql_log(ql_log_fatal, vha, 0x00aa,
2497 "%s: Error during card handshake!\n", __func__); 2521 "Error during card handshake.\n");
2498 return QLA_FUNCTION_FAILED; 2522 return QLA_FUNCTION_FAILED;
2499 } 2523 }
2500 2524
@@ -2663,8 +2687,11 @@ qla82xx_start_scsi(srb_t *sp)
2663 /* Send marker if required */ 2687 /* Send marker if required */
2664 if (vha->marker_needed != 0) { 2688 if (vha->marker_needed != 0) {
2665 if (qla2x00_marker(vha, req, 2689 if (qla2x00_marker(vha, req,
2666 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 2690 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2691 ql_log(ql_log_warn, vha, 0x300c,
2692 "qla2x00_marker failed for cmd=%p.\n", cmd);
2667 return QLA_FUNCTION_FAILED; 2693 return QLA_FUNCTION_FAILED;
2694 }
2668 vha->marker_needed = 0; 2695 vha->marker_needed = 0;
2669 } 2696 }
2670 2697
@@ -2701,8 +2728,13 @@ qla82xx_start_scsi(srb_t *sp)
2701 uint16_t i; 2728 uint16_t i;
2702 2729
2703 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); 2730 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2704 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) 2731 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2732 ql_dbg(ql_dbg_io, vha, 0x300d,
2733 "Num of DSD list %d is than %d for cmd=%p.\n",
2734 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2735 cmd);
2705 goto queuing_error; 2736 goto queuing_error;
2737 }
2706 2738
2707 if (more_dsd_lists <= ha->gbl_dsd_avail) 2739 if (more_dsd_lists <= ha->gbl_dsd_avail)
2708 goto sufficient_dsds; 2740 goto sufficient_dsds;
@@ -2711,13 +2743,20 @@ qla82xx_start_scsi(srb_t *sp)
2711 2743
2712 for (i = 0; i < more_dsd_lists; i++) { 2744 for (i = 0; i < more_dsd_lists; i++) {
2713 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2745 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2714 if (!dsd_ptr) 2746 if (!dsd_ptr) {
2747 ql_log(ql_log_fatal, vha, 0x300e,
2748 "Failed to allocate memory for dsd_dma "
2749 "for cmd=%p.\n", cmd);
2715 goto queuing_error; 2750 goto queuing_error;
2751 }
2716 2752
2717 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2753 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2718 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2754 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2719 if (!dsd_ptr->dsd_addr) { 2755 if (!dsd_ptr->dsd_addr) {
2720 kfree(dsd_ptr); 2756 kfree(dsd_ptr);
2757 ql_log(ql_log_fatal, vha, 0x300f,
2758 "Failed to allocate memory for dsd_addr "
2759 "for cmd=%p.\n", cmd);
2721 goto queuing_error; 2760 goto queuing_error;
2722 } 2761 }
2723 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2762 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
@@ -2742,17 +2781,16 @@ sufficient_dsds:
2742 2781
2743 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2782 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2744 if (!sp->ctx) { 2783 if (!sp->ctx) {
2745 DEBUG(printk(KERN_INFO 2784 ql_log(ql_log_fatal, vha, 0x3010,
2746 "%s(%ld): failed to allocate" 2785 "Failed to allocate ctx for cmd=%p.\n", cmd);
2747 " ctx.\n", __func__, vha->host_no));
2748 goto queuing_error; 2786 goto queuing_error;
2749 } 2787 }
2750 memset(ctx, 0, sizeof(struct ct6_dsd)); 2788 memset(ctx, 0, sizeof(struct ct6_dsd));
2751 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2789 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2752 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2790 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2753 if (!ctx->fcp_cmnd) { 2791 if (!ctx->fcp_cmnd) {
2754 DEBUG2_3(printk("%s(%ld): failed to allocate" 2792 ql_log(ql_log_fatal, vha, 0x3011,
2755 " fcp_cmnd.\n", __func__, vha->host_no)); 2793 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2756 goto queuing_error_fcp_cmnd; 2794 goto queuing_error_fcp_cmnd;
2757 } 2795 }
2758 2796
@@ -2766,6 +2804,9 @@ sufficient_dsds:
2766 /* SCSI command bigger than 16 bytes must be 2804 /* SCSI command bigger than 16 bytes must be
2767 * multiple of 4 2805 * multiple of 4
2768 */ 2806 */
2807 ql_log(ql_log_warn, vha, 0x3012,
2808 "scsi cmd len %d not multiple of 4 "
2809 "for cmd=%p.\n", cmd->cmd_len, cmd);
2769 goto queuing_error_fcp_cmnd; 2810 goto queuing_error_fcp_cmnd;
2770 } 2811 }
2771 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2812 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
@@ -2845,7 +2886,7 @@ sufficient_dsds:
2845 cmd_pkt->entry_status = (uint8_t) rsp->id; 2886 cmd_pkt->entry_status = (uint8_t) rsp->id;
2846 } else { 2887 } else {
2847 struct cmd_type_7 *cmd_pkt; 2888 struct cmd_type_7 *cmd_pkt;
2848 req_cnt = qla24xx_calc_iocbs(tot_dsds); 2889 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2849 if (req->cnt < (req_cnt + 2)) { 2890 if (req->cnt < (req_cnt + 2)) {
2850 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2891 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2851 &reg->req_q_out[0]); 2892 &reg->req_q_out[0]);
@@ -2979,8 +3020,8 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2979 /* Dword reads to flash. */ 3020 /* Dword reads to flash. */
2980 for (i = 0; i < length/4; i++, faddr += 4) { 3021 for (i = 0; i < length/4; i++, faddr += 4) {
2981 if (qla82xx_rom_fast_read(ha, faddr, &val)) { 3022 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2982 qla_printk(KERN_WARNING, ha, 3023 ql_log(ql_log_warn, vha, 0x0106,
2983 "Do ROM fast read failed\n"); 3024 "Do ROM fast read failed.\n");
2984 goto done_read; 3025 goto done_read;
2985 } 3026 }
2986 dwptr[i] = __constant_cpu_to_le32(val); 3027 dwptr[i] = __constant_cpu_to_le32(val);
@@ -2994,10 +3035,12 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
2994{ 3035{
2995 int ret; 3036 int ret;
2996 uint32_t val; 3037 uint32_t val;
3038 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2997 3039
2998 ret = ql82xx_rom_lock_d(ha); 3040 ret = ql82xx_rom_lock_d(ha);
2999 if (ret < 0) { 3041 if (ret < 0) {
3000 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3042 ql_log(ql_log_warn, vha, 0xb014,
3043 "ROM Lock failed.\n");
3001 return ret; 3044 return ret;
3002 } 3045 }
3003 3046
@@ -3013,7 +3056,8 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
3013 } 3056 }
3014 3057
3015 if (qla82xx_write_disable_flash(ha) != 0) 3058 if (qla82xx_write_disable_flash(ha) != 0)
3016 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3059 ql_log(ql_log_warn, vha, 0xb015,
3060 "Write disable failed.\n");
3017 3061
3018done_unprotect: 3062done_unprotect:
3019 qla82xx_rom_unlock(ha); 3063 qla82xx_rom_unlock(ha);
@@ -3025,10 +3069,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3025{ 3069{
3026 int ret; 3070 int ret;
3027 uint32_t val; 3071 uint32_t val;
3072 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3028 3073
3029 ret = ql82xx_rom_lock_d(ha); 3074 ret = ql82xx_rom_lock_d(ha);
3030 if (ret < 0) { 3075 if (ret < 0) {
3031 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3076 ql_log(ql_log_warn, vha, 0xb016,
3077 "ROM Lock failed.\n");
3032 return ret; 3078 return ret;
3033 } 3079 }
3034 3080
@@ -3040,10 +3086,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3040 /* LOCK all sectors */ 3086 /* LOCK all sectors */
3041 ret = qla82xx_write_status_reg(ha, val); 3087 ret = qla82xx_write_status_reg(ha, val);
3042 if (ret < 0) 3088 if (ret < 0)
3043 qla_printk(KERN_WARNING, ha, "Write status register failed\n"); 3089 ql_log(ql_log_warn, vha, 0xb017,
3090 "Write status register failed.\n");
3044 3091
3045 if (qla82xx_write_disable_flash(ha) != 0) 3092 if (qla82xx_write_disable_flash(ha) != 0)
3046 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3093 ql_log(ql_log_warn, vha, 0xb018,
3094 "Write disable failed.\n");
3047done_protect: 3095done_protect:
3048 qla82xx_rom_unlock(ha); 3096 qla82xx_rom_unlock(ha);
3049 return ret; 3097 return ret;
@@ -3053,10 +3101,12 @@ static int
3053qla82xx_erase_sector(struct qla_hw_data *ha, int addr) 3101qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3054{ 3102{
3055 int ret = 0; 3103 int ret = 0;
3104 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3056 3105
3057 ret = ql82xx_rom_lock_d(ha); 3106 ret = ql82xx_rom_lock_d(ha);
3058 if (ret < 0) { 3107 if (ret < 0) {
3059 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3108 ql_log(ql_log_warn, vha, 0xb019,
3109 "ROM Lock failed.\n");
3060 return ret; 3110 return ret;
3061 } 3111 }
3062 3112
@@ -3066,8 +3116,8 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3066 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); 3116 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3067 3117
3068 if (qla82xx_wait_rom_done(ha)) { 3118 if (qla82xx_wait_rom_done(ha)) {
3069 qla_printk(KERN_WARNING, ha, 3119 ql_log(ql_log_warn, vha, 0xb01a,
3070 "Error waiting for rom done\n"); 3120 "Error waiting for rom done.\n");
3071 ret = -1; 3121 ret = -1;
3072 goto done; 3122 goto done;
3073 } 3123 }
@@ -3110,10 +3160,10 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3110 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 3160 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3111 &optrom_dma, GFP_KERNEL); 3161 &optrom_dma, GFP_KERNEL);
3112 if (!optrom) { 3162 if (!optrom) {
3113 qla_printk(KERN_DEBUG, ha, 3163 ql_log(ql_log_warn, vha, 0xb01b,
3114 "Unable to allocate memory for optrom " 3164 "Unable to allocate memory "
3115 "burst write (%x KB).\n", 3165 "for optron burst write (%x KB).\n",
3116 OPTROM_BURST_SIZE / 1024); 3166 OPTROM_BURST_SIZE / 1024);
3117 } 3167 }
3118 } 3168 }
3119 3169
@@ -3122,8 +3172,8 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3122 3172
3123 ret = qla82xx_unprotect_flash(ha); 3173 ret = qla82xx_unprotect_flash(ha);
3124 if (ret) { 3174 if (ret) {
3125 qla_printk(KERN_WARNING, ha, 3175 ql_log(ql_log_warn, vha, 0xb01c,
3126 "Unable to unprotect flash for update.\n"); 3176 "Unable to unprotect flash for update.\n");
3127 goto write_done; 3177 goto write_done;
3128 } 3178 }
3129 3179
@@ -3133,9 +3183,9 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3133 3183
3134 ret = qla82xx_erase_sector(ha, faddr); 3184 ret = qla82xx_erase_sector(ha, faddr);
3135 if (ret) { 3185 if (ret) {
3136 DEBUG9(qla_printk(KERN_ERR, ha, 3186 ql_log(ql_log_warn, vha, 0xb01d,
3137 "Unable to erase sector: " 3187 "Unable to erase sector: address=%x.\n",
3138 "address=%x.\n", faddr)); 3188 faddr);
3139 break; 3189 break;
3140 } 3190 }
3141 } 3191 }
@@ -3149,12 +3199,12 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3149 (ha->flash_data_off | faddr), 3199 (ha->flash_data_off | faddr),
3150 OPTROM_BURST_DWORDS); 3200 OPTROM_BURST_DWORDS);
3151 if (ret != QLA_SUCCESS) { 3201 if (ret != QLA_SUCCESS) {
3152 qla_printk(KERN_WARNING, ha, 3202 ql_log(ql_log_warn, vha, 0xb01e,
3153 "Unable to burst-write optrom segment " 3203 "Unable to burst-write optrom segment "
3154 "(%x/%x/%llx).\n", ret, 3204 "(%x/%x/%llx).\n", ret,
3155 (ha->flash_data_off | faddr), 3205 (ha->flash_data_off | faddr),
3156 (unsigned long long)optrom_dma); 3206 (unsigned long long)optrom_dma);
3157 qla_printk(KERN_WARNING, ha, 3207 ql_log(ql_log_warn, vha, 0xb01f,
3158 "Reverting to slow-write.\n"); 3208 "Reverting to slow-write.\n");
3159 3209
3160 dma_free_coherent(&ha->pdev->dev, 3210 dma_free_coherent(&ha->pdev->dev,
@@ -3171,16 +3221,16 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3171 ret = qla82xx_write_flash_dword(ha, faddr, 3221 ret = qla82xx_write_flash_dword(ha, faddr,
3172 cpu_to_le32(*dwptr)); 3222 cpu_to_le32(*dwptr));
3173 if (ret) { 3223 if (ret) {
3174 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program" 3224 ql_dbg(ql_dbg_p3p, vha, 0xb020,
3175 "flash address=%x data=%x.\n", __func__, 3225 "Unable to program flash address=%x data=%x.\n",
3176 ha->host_no, faddr, *dwptr)); 3226 faddr, *dwptr);
3177 break; 3227 break;
3178 } 3228 }
3179 } 3229 }
3180 3230
3181 ret = qla82xx_protect_flash(ha); 3231 ret = qla82xx_protect_flash(ha);
3182 if (ret) 3232 if (ret)
3183 qla_printk(KERN_WARNING, ha, 3233 ql_log(ql_log_warn, vha, 0xb021,
3184 "Unable to protect flash after update.\n"); 3234 "Unable to protect flash after update.\n");
3185write_done: 3235write_done:
3186 if (optrom) 3236 if (optrom)
@@ -3244,9 +3294,12 @@ qla82xx_start_iocbs(srb_t *sp)
3244 3294
3245void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) 3295void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3246{ 3296{
3297 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3298
3247 if (qla82xx_rom_lock(ha)) 3299 if (qla82xx_rom_lock(ha))
3248 /* Someone else is holding the lock. */ 3300 /* Someone else is holding the lock. */
3249 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n"); 3301 ql_log(ql_log_info, vha, 0xb022,
3302 "Resetting rom_lock.\n");
3250 3303
3251 /* 3304 /*
3252 * Either we got the lock, or someone 3305 * Either we got the lock, or someone
@@ -3313,7 +3366,8 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3313 3366
3314dev_initialize: 3367dev_initialize:
3315 /* set to DEV_INITIALIZING */ 3368 /* set to DEV_INITIALIZING */
3316 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 3369 ql_log(ql_log_info, vha, 0x009e,
3370 "HW State: INITIALIZING.\n");
3317 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 3371 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3318 3372
3319 /* Driver that sets device state to initializating sets IDC version */ 3373 /* Driver that sets device state to initializating sets IDC version */
@@ -3324,14 +3378,16 @@ dev_initialize:
3324 qla82xx_idc_lock(ha); 3378 qla82xx_idc_lock(ha);
3325 3379
3326 if (rval != QLA_SUCCESS) { 3380 if (rval != QLA_SUCCESS) {
3327 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 3381 ql_log(ql_log_fatal, vha, 0x00ad,
3382 "HW State: FAILED.\n");
3328 qla82xx_clear_drv_active(ha); 3383 qla82xx_clear_drv_active(ha);
3329 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 3384 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3330 return rval; 3385 return rval;
3331 } 3386 }
3332 3387
3333dev_ready: 3388dev_ready:
3334 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 3389 ql_log(ql_log_info, vha, 0x00ae,
3390 "HW State: READY.\n");
3335 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 3391 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3336 3392
3337 return QLA_SUCCESS; 3393 return QLA_SUCCESS;
@@ -3376,15 +3432,15 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3376 /* quiescence timeout, other functions didn't ack 3432 /* quiescence timeout, other functions didn't ack
3377 * changing the state to DEV_READY 3433 * changing the state to DEV_READY
3378 */ 3434 */
3379 qla_printk(KERN_INFO, ha, 3435 ql_log(ql_log_info, vha, 0xb023,
3380 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME); 3436 "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
3381 qla_printk(KERN_INFO, ha, 3437 ql_log(ql_log_info, vha, 0xb024,
3382 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active, 3438 "DRV_ACTIVE:%d DRV_STATE:%d.\n",
3383 drv_state); 3439 drv_active, drv_state);
3384 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3440 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3385 QLA82XX_DEV_READY); 3441 QLA82XX_DEV_READY);
3386 qla_printk(KERN_INFO, ha, 3442 ql_log(ql_log_info, vha, 0xb025,
3387 "HW State: DEV_READY\n"); 3443 "HW State: DEV_READY.\n");
3388 qla82xx_idc_unlock(ha); 3444 qla82xx_idc_unlock(ha);
3389 qla2x00_perform_loop_resync(vha); 3445 qla2x00_perform_loop_resync(vha);
3390 qla82xx_idc_lock(ha); 3446 qla82xx_idc_lock(ha);
@@ -3404,7 +3460,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3404 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3460 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3405 /* everyone acked so set the state to DEV_QUIESCENCE */ 3461 /* everyone acked so set the state to DEV_QUIESCENCE */
3406 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { 3462 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3407 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n"); 3463 ql_log(ql_log_info, vha, 0xb026,
3464 "HW State: DEV_QUIESCENT.\n");
3408 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); 3465 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3409 } 3466 }
3410} 3467}
@@ -3441,7 +3498,8 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3441 struct qla_hw_data *ha = vha->hw; 3498 struct qla_hw_data *ha = vha->hw;
3442 3499
3443 /* Disable the board */ 3500 /* Disable the board */
3444 qla_printk(KERN_INFO, ha, "Disabling the board\n"); 3501 ql_log(ql_log_fatal, vha, 0x00b8,
3502 "Disabling the board.\n");
3445 3503
3446 qla82xx_idc_lock(ha); 3504 qla82xx_idc_lock(ha);
3447 qla82xx_clear_drv_active(ha); 3505 qla82xx_clear_drv_active(ha);
@@ -3492,8 +3550,8 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3492 3550
3493 while (drv_state != drv_active) { 3551 while (drv_state != drv_active) {
3494 if (time_after_eq(jiffies, reset_timeout)) { 3552 if (time_after_eq(jiffies, reset_timeout)) {
3495 qla_printk(KERN_INFO, ha, 3553 ql_log(ql_log_warn, vha, 0x00b5,
3496 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME); 3554 "Reset timeout.\n");
3497 break; 3555 break;
3498 } 3556 }
3499 qla82xx_idc_unlock(ha); 3557 qla82xx_idc_unlock(ha);
@@ -3504,12 +3562,15 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3504 } 3562 }
3505 3563
3506 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3564 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3507 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 3565 ql_log(ql_log_info, vha, 0x00b6,
3508 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3566 "Device state is 0x%x = %s.\n",
3567 dev_state,
3568 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3509 3569
3510 /* Force to DEV_COLD unless someone else is starting a reset */ 3570 /* Force to DEV_COLD unless someone else is starting a reset */
3511 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3571 if (dev_state != QLA82XX_DEV_INITIALIZING) {
3512 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 3572 ql_log(ql_log_info, vha, 0x00b7,
3573 "HW State: COLD/RE-INIT.\n");
3513 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3574 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3514 } 3575 }
3515} 3576}
@@ -3523,8 +3584,12 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3523 fw_heartbeat_counter = qla82xx_rd_32(vha->hw, 3584 fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3524 QLA82XX_PEG_ALIVE_COUNTER); 3585 QLA82XX_PEG_ALIVE_COUNTER);
3525 /* all 0xff, assume AER/EEH in progress, ignore */ 3586 /* all 0xff, assume AER/EEH in progress, ignore */
3526 if (fw_heartbeat_counter == 0xffffffff) 3587 if (fw_heartbeat_counter == 0xffffffff) {
3588 ql_dbg(ql_dbg_timer, vha, 0x6003,
3589 "FW heartbeat counter is 0xffffffff, "
3590 "returning status=%d.\n", status);
3527 return status; 3591 return status;
3592 }
3528 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 3593 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3529 vha->seconds_since_last_heartbeat++; 3594 vha->seconds_since_last_heartbeat++;
3530 /* FW not alive after 2 seconds */ 3595 /* FW not alive after 2 seconds */
@@ -3535,6 +3600,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3535 } else 3600 } else
3536 vha->seconds_since_last_heartbeat = 0; 3601 vha->seconds_since_last_heartbeat = 0;
3537 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3602 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3603 if (status)
3604 ql_dbg(ql_dbg_timer, vha, 0x6004,
3605 "Returning status=%d.\n", status);
3538 return status; 3606 return status;
3539} 3607}
3540 3608
@@ -3565,8 +3633,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3565 3633
3566 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3634 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3567 old_dev_state = dev_state; 3635 old_dev_state = dev_state;
3568 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 3636 ql_log(ql_log_info, vha, 0x009b,
3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3637 "Device state is 0x%x = %s.\n",
3638 dev_state,
3639 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3570 3640
3571 /* wait for 30 seconds for device to go ready */ 3641 /* wait for 30 seconds for device to go ready */
3572 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3642 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3574,9 +3644,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3574 while (1) { 3644 while (1) {
3575 3645
3576 if (time_after_eq(jiffies, dev_init_timeout)) { 3646 if (time_after_eq(jiffies, dev_init_timeout)) {
3577 DEBUG(qla_printk(KERN_INFO, ha, 3647 ql_log(ql_log_fatal, vha, 0x009c,
3578 "%s: device init failed!\n", 3648 "Device init failed.\n");
3579 QLA2XXX_DRIVER_NAME));
3580 rval = QLA_FUNCTION_FAILED; 3649 rval = QLA_FUNCTION_FAILED;
3581 break; 3650 break;
3582 } 3651 }
@@ -3586,10 +3655,11 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3586 old_dev_state = dev_state; 3655 old_dev_state = dev_state;
3587 } 3656 }
3588 if (loopcount < 5) { 3657 if (loopcount < 5) {
3589 qla_printk(KERN_INFO, ha, 3658 ql_log(ql_log_info, vha, 0x009d,
3590 "2:Device state is 0x%x = %s\n", dev_state, 3659 "Device state is 0x%x = %s.\n",
3591 dev_state < MAX_STATES ? 3660 dev_state,
3592 qdev_state[dev_state] : "Unknown"); 3661 dev_state < MAX_STATES ? qdev_state[dev_state] :
3662 "Unknown");
3593 } 3663 }
3594 3664
3595 switch (dev_state) { 3665 switch (dev_state) {
@@ -3656,29 +3726,26 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3656 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3726 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3657 if (dev_state == QLA82XX_DEV_NEED_RESET && 3727 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3658 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3728 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3659 qla_printk(KERN_WARNING, ha, 3729 ql_log(ql_log_warn, vha, 0x6001,
3660 "scsi(%ld) %s: Adapter reset needed!\n", 3730 "Adapter reset needed.\n");
3661 vha->host_no, __func__);
3662 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3731 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3663 qla2xxx_wake_dpc(vha); 3732 qla2xxx_wake_dpc(vha);
3664 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3733 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3665 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3734 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3666 DEBUG(qla_printk(KERN_INFO, ha, 3735 ql_log(ql_log_warn, vha, 0x6002,
3667 "scsi(%ld) %s - detected quiescence needed\n", 3736 "Quiescent needed.\n");
3668 vha->host_no, __func__));
3669 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3737 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3670 qla2xxx_wake_dpc(vha); 3738 qla2xxx_wake_dpc(vha);
3671 } else { 3739 } else {
3672 if (qla82xx_check_fw_alive(vha)) { 3740 if (qla82xx_check_fw_alive(vha)) {
3673 halt_status = qla82xx_rd_32(ha, 3741 halt_status = qla82xx_rd_32(ha,
3674 QLA82XX_PEG_HALT_STATUS1); 3742 QLA82XX_PEG_HALT_STATUS1);
3675 qla_printk(KERN_INFO, ha, 3743 ql_dbg(ql_dbg_timer, vha, 0x6005,
3676 "scsi(%ld): %s, Dumping hw/fw registers:\n " 3744 "dumping hw/fw registers:.\n "
3677 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n " 3745 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3678 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n " 3746 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
3679 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n " 3747 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
3680 " PEG_NET_4_PC: 0x%x\n", 3748 " PEG_NET_4_PC: 0x%x.\n", halt_status,
3681 vha->host_no, __func__, halt_status,
3682 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), 3749 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3683 qla82xx_rd_32(ha, 3750 qla82xx_rd_32(ha,
3684 QLA82XX_CRB_PEG_NET_0 + 0x3c), 3751 QLA82XX_CRB_PEG_NET_0 + 0x3c),
@@ -3694,9 +3761,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3694 set_bit(ISP_UNRECOVERABLE, 3761 set_bit(ISP_UNRECOVERABLE,
3695 &vha->dpc_flags); 3762 &vha->dpc_flags);
3696 } else { 3763 } else {
3697 qla_printk(KERN_INFO, ha, 3764 ql_log(ql_log_info, vha, 0x6006,
3698 "scsi(%ld): %s - detect abort needed\n", 3765 "Detect abort needed.\n");
3699 vha->host_no, __func__);
3700 set_bit(ISP_ABORT_NEEDED, 3766 set_bit(ISP_ABORT_NEEDED,
3701 &vha->dpc_flags); 3767 &vha->dpc_flags);
3702 } 3768 }
@@ -3704,10 +3770,10 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3704 ha->flags.isp82xx_fw_hung = 1; 3770 ha->flags.isp82xx_fw_hung = 1;
3705 if (ha->flags.mbox_busy) { 3771 if (ha->flags.mbox_busy) {
3706 ha->flags.mbox_int = 1; 3772 ha->flags.mbox_int = 1;
3707 DEBUG2(qla_printk(KERN_ERR, ha, 3773 ql_log(ql_log_warn, vha, 0x6007,
3708 "scsi(%ld) Due to fw hung, doing " 3774 "Due to FW hung, doing "
3709 "premature completion of mbx " 3775 "premature completion of mbx "
3710 "command\n", vha->host_no)); 3776 "command.\n");
3711 if (test_bit(MBX_INTR_WAIT, 3777 if (test_bit(MBX_INTR_WAIT,
3712 &ha->mbx_cmd_flags)) 3778 &ha->mbx_cmd_flags))
3713 complete(&ha->mbx_intr_comp); 3779 complete(&ha->mbx_intr_comp);
@@ -3742,9 +3808,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3742 uint32_t dev_state; 3808 uint32_t dev_state;
3743 3809
3744 if (vha->device_flags & DFLG_DEV_FAILED) { 3810 if (vha->device_flags & DFLG_DEV_FAILED) {
3745 qla_printk(KERN_WARNING, ha, 3811 ql_log(ql_log_warn, vha, 0x8024,
3746 "%s(%ld): Device in failed state, " 3812 "Device in failed state, exiting.\n");
3747 "Exiting.\n", __func__, vha->host_no);
3748 return QLA_SUCCESS; 3813 return QLA_SUCCESS;
3749 } 3814 }
3750 ha->flags.isp82xx_reset_hdlr_active = 1; 3815 ha->flags.isp82xx_reset_hdlr_active = 1;
@@ -3752,13 +3817,14 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3752 qla82xx_idc_lock(ha); 3817 qla82xx_idc_lock(ha);
3753 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3818 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3754 if (dev_state == QLA82XX_DEV_READY) { 3819 if (dev_state == QLA82XX_DEV_READY) {
3755 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 3820 ql_log(ql_log_info, vha, 0x8025,
3821 "HW State: NEED RESET.\n");
3756 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3822 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3757 QLA82XX_DEV_NEED_RESET); 3823 QLA82XX_DEV_NEED_RESET);
3758 } else 3824 } else
3759 qla_printk(KERN_INFO, ha, "HW State: %s\n", 3825 ql_log(ql_log_info, vha, 0x8026,
3760 dev_state < MAX_STATES ? 3826 "Hw State: %s.\n", dev_state < MAX_STATES ?
3761 qdev_state[dev_state] : "Unknown"); 3827 qdev_state[dev_state] : "Unknown");
3762 qla82xx_idc_unlock(ha); 3828 qla82xx_idc_unlock(ha);
3763 3829
3764 rval = qla82xx_device_state_handler(vha); 3830 rval = qla82xx_device_state_handler(vha);
@@ -3777,9 +3843,9 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3777 vha->flags.online = 1; 3843 vha->flags.online = 1;
3778 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3844 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3779 if (ha->isp_abort_cnt == 0) { 3845 if (ha->isp_abort_cnt == 0) {
3780 qla_printk(KERN_WARNING, ha, 3846 ql_log(ql_log_warn, vha, 0x8027,
3781 "ISP error recovery failed - " 3847 "ISP error recover failed - board "
3782 "board disabled\n"); 3848 "disabled.\n");
3783 /* 3849 /*
3784 * The next call disables the board 3850 * The next call disables the board
3785 * completely. 3851 * completely.
@@ -3791,16 +3857,16 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3791 rval = QLA_SUCCESS; 3857 rval = QLA_SUCCESS;
3792 } else { /* schedule another ISP abort */ 3858 } else { /* schedule another ISP abort */
3793 ha->isp_abort_cnt--; 3859 ha->isp_abort_cnt--;
3794 DEBUG(qla_printk(KERN_INFO, ha, 3860 ql_log(ql_log_warn, vha, 0x8036,
3795 "qla%ld: ISP abort - retry remaining %d\n", 3861 "ISP abort - retry remaining %d.\n",
3796 vha->host_no, ha->isp_abort_cnt)); 3862 ha->isp_abort_cnt);
3797 rval = QLA_FUNCTION_FAILED; 3863 rval = QLA_FUNCTION_FAILED;
3798 } 3864 }
3799 } else { 3865 } else {
3800 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3866 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3801 DEBUG(qla_printk(KERN_INFO, ha, 3867 ql_dbg(ql_dbg_taskm, vha, 0x8029,
3802 "(%ld): ISP error recovery - retrying (%d) " 3868 "ISP error recovery - retrying (%d) more times.\n",
3803 "more times\n", vha->host_no, ha->isp_abort_cnt)); 3869 ha->isp_abort_cnt);
3804 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3870 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3805 rval = QLA_FUNCTION_FAILED; 3871 rval = QLA_FUNCTION_FAILED;
3806 } 3872 }
@@ -3872,8 +3938,8 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3872 break; 3938 break;
3873 } 3939 }
3874 } 3940 }
3875 DEBUG2(printk(KERN_INFO 3941 ql_dbg(ql_dbg_p3p, vha, 0xb027,
3876 "%s status=%d\n", __func__, status)); 3942 "%s status=%d.\n", status);
3877 3943
3878 return status; 3944 return status;
3879} 3945}
@@ -3902,6 +3968,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3902 } 3968 }
3903 } 3969 }
3904 } 3970 }
3971 ql_dbg(ql_dbg_init, vha, 0x00b0,
3972 "Entered %s fw_hung=%d.\n",
3973 __func__, ha->flags.isp82xx_fw_hung);
3905 3974
3906 /* Abort all commands gracefully if fw NOT hung */ 3975 /* Abort all commands gracefully if fw NOT hung */
3907 if (!ha->flags.isp82xx_fw_hung) { 3976 if (!ha->flags.isp82xx_fw_hung) {
@@ -3922,13 +3991,13 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3922 spin_unlock_irqrestore( 3991 spin_unlock_irqrestore(
3923 &ha->hardware_lock, flags); 3992 &ha->hardware_lock, flags);
3924 if (ha->isp_ops->abort_command(sp)) { 3993 if (ha->isp_ops->abort_command(sp)) {
3925 qla_printk(KERN_INFO, ha, 3994 ql_log(ql_log_info, vha,
3926 "scsi(%ld): mbx abort command failed in %s\n", 3995 0x00b1,
3927 vha->host_no, __func__); 3996 "mbx abort failed.\n");
3928 } else { 3997 } else {
3929 qla_printk(KERN_INFO, ha, 3998 ql_log(ql_log_info, vha,
3930 "scsi(%ld): mbx abort command success in %s\n", 3999 0x00b2,
3931 vha->host_no, __func__); 4000 "mbx abort success.\n");
3932 } 4001 }
3933 spin_lock_irqsave(&ha->hardware_lock, flags); 4002 spin_lock_irqsave(&ha->hardware_lock, flags);
3934 } 4003 }
@@ -3940,8 +4009,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3940 /* Wait for pending cmds (physical and virtual) to complete */ 4009 /* Wait for pending cmds (physical and virtual) to complete */
3941 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 4010 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3942 WAIT_HOST) == QLA_SUCCESS) { 4011 WAIT_HOST) == QLA_SUCCESS) {
3943 DEBUG2(qla_printk(KERN_INFO, ha, 4012 ql_dbg(ql_dbg_init, vha, 0x00b3,
3944 "Done wait for pending commands\n")); 4013 "Done wait for "
4014 "pending commands.\n");
3945 } 4015 }
3946 } 4016 }
3947} 4017}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f461925a9dfc..e02df276804e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -35,6 +35,10 @@ static struct kmem_cache *srb_cachep;
35 * CT6 CTX allocation cache 35 * CT6 CTX allocation cache
36 */ 36 */
37static struct kmem_cache *ctx_cachep; 37static struct kmem_cache *ctx_cachep;
38/*
39 * error level for logging
40 */
41int ql_errlev = ql_log_all;
38 42
39int ql2xlogintimeout = 20; 43int ql2xlogintimeout = 20;
40module_param(ql2xlogintimeout, int, S_IRUGO); 44module_param(ql2xlogintimeout, int, S_IRUGO);
@@ -69,8 +73,17 @@ MODULE_PARM_DESC(ql2xallocfwdump,
69int ql2xextended_error_logging; 73int ql2xextended_error_logging;
70module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 74module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
71MODULE_PARM_DESC(ql2xextended_error_logging, 75MODULE_PARM_DESC(ql2xextended_error_logging,
72 "Option to enable extended error logging, " 76 "Option to enable extended error logging,\n"
73 "Default is 0 - no logging. 1 - log errors."); 77 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
78 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
79 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
80 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
81 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
82 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
83 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
84 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
85 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
86 "\t\tDo LOGICAL OR of the value to enable more than one level");
74 87
75int ql2xshiftctondsd = 6; 88int ql2xshiftctondsd = 6;
76module_param(ql2xshiftctondsd, int, S_IRUGO); 89module_param(ql2xshiftctondsd, int, S_IRUGO);
@@ -128,8 +141,8 @@ MODULE_PARM_DESC(ql2xmultique_tag,
128int ql2xfwloadbin; 141int ql2xfwloadbin;
129module_param(ql2xfwloadbin, int, S_IRUGO); 142module_param(ql2xfwloadbin, int, S_IRUGO);
130MODULE_PARM_DESC(ql2xfwloadbin, 143MODULE_PARM_DESC(ql2xfwloadbin,
131 "Option to specify location from which to load ISP firmware:\n" 144 "Option to specify location from which to load ISP firmware:.\n"
132 " 2 -- load firmware via the request_firmware() (hotplug)\n" 145 " 2 -- load firmware via the request_firmware() (hotplug).\n"
133 " interface.\n" 146 " interface.\n"
134 " 1 -- load firmware from flash.\n" 147 " 1 -- load firmware from flash.\n"
135 " 0 -- use default semantics.\n"); 148 " 0 -- use default semantics.\n");
@@ -143,7 +156,7 @@ MODULE_PARM_DESC(ql2xetsenable,
143int ql2xdbwr = 1; 156int ql2xdbwr = 1;
144module_param(ql2xdbwr, int, S_IRUGO); 157module_param(ql2xdbwr, int, S_IRUGO);
145MODULE_PARM_DESC(ql2xdbwr, 158MODULE_PARM_DESC(ql2xdbwr,
146 "Option to specify scheme for request queue posting\n" 159 "Option to specify scheme for request queue posting.\n"
147 " 0 -- Regular doorbell.\n" 160 " 0 -- Regular doorbell.\n"
148 " 1 -- CAMRAM doorbell (faster).\n"); 161 " 1 -- CAMRAM doorbell (faster).\n");
149 162
@@ -168,7 +181,7 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
168int ql2xdontresethba; 181int ql2xdontresethba;
169module_param(ql2xdontresethba, int, S_IRUGO); 182module_param(ql2xdontresethba, int, S_IRUGO);
170MODULE_PARM_DESC(ql2xdontresethba, 183MODULE_PARM_DESC(ql2xdontresethba,
171 "Option to specify reset behaviour\n" 184 "Option to specify reset behaviour.\n"
172 " 0 (Default) -- Reset on failure.\n" 185 " 0 (Default) -- Reset on failure.\n"
173 " 1 -- Do not reset on failure.\n"); 186 " 1 -- Do not reset on failure.\n");
174 187
@@ -247,8 +260,11 @@ static inline void
247qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 260qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
248{ 261{
249 /* Currently used for 82XX only. */ 262 /* Currently used for 82XX only. */
250 if (vha->device_flags & DFLG_DEV_FAILED) 263 if (vha->device_flags & DFLG_DEV_FAILED) {
264 ql_dbg(ql_dbg_timer, vha, 0x600d,
265 "Device in a failed state, returning.\n");
251 return; 266 return;
267 }
252 268
253 mod_timer(&vha->timer, jiffies + interval * HZ); 269 mod_timer(&vha->timer, jiffies + interval * HZ);
254} 270}
@@ -273,19 +289,20 @@ static void qla2x00_sp_free_dma(srb_t *);
273/* -------------------------------------------------------------------------- */ 289/* -------------------------------------------------------------------------- */
274static int qla2x00_alloc_queues(struct qla_hw_data *ha) 290static int qla2x00_alloc_queues(struct qla_hw_data *ha)
275{ 291{
292 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
276 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 293 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
277 GFP_KERNEL); 294 GFP_KERNEL);
278 if (!ha->req_q_map) { 295 if (!ha->req_q_map) {
279 qla_printk(KERN_WARNING, ha, 296 ql_log(ql_log_fatal, vha, 0x003b,
280 "Unable to allocate memory for request queue ptrs\n"); 297 "Unable to allocate memory for request queue ptrs.\n");
281 goto fail_req_map; 298 goto fail_req_map;
282 } 299 }
283 300
284 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 301 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
285 GFP_KERNEL); 302 GFP_KERNEL);
286 if (!ha->rsp_q_map) { 303 if (!ha->rsp_q_map) {
287 qla_printk(KERN_WARNING, ha, 304 ql_log(ql_log_fatal, vha, 0x003c,
288 "Unable to allocate memory for response queue ptrs\n"); 305 "Unable to allocate memory for response queue ptrs.\n");
289 goto fail_rsp_map; 306 goto fail_rsp_map;
290 } 307 }
291 set_bit(0, ha->rsp_qid_map); 308 set_bit(0, ha->rsp_qid_map);
@@ -349,8 +366,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
349 struct qla_hw_data *ha = vha->hw; 366 struct qla_hw_data *ha = vha->hw;
350 367
351 if (!(ha->fw_attributes & BIT_6)) { 368 if (!(ha->fw_attributes & BIT_6)) {
352 qla_printk(KERN_INFO, ha, 369 ql_log(ql_log_warn, vha, 0x00d8,
353 "Firmware is not multi-queue capable\n"); 370 "Firmware is not multi-queue capable.\n");
354 goto fail; 371 goto fail;
355 } 372 }
356 if (ql2xmultique_tag) { 373 if (ql2xmultique_tag) {
@@ -359,8 +376,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
359 req = qla25xx_create_req_que(ha, options, 0, 0, -1, 376 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
360 QLA_DEFAULT_QUE_QOS); 377 QLA_DEFAULT_QUE_QOS);
361 if (!req) { 378 if (!req) {
362 qla_printk(KERN_WARNING, ha, 379 ql_log(ql_log_warn, vha, 0x00e0,
363 "Can't create request queue\n"); 380 "Failed to create request queue.\n");
364 goto fail; 381 goto fail;
365 } 382 }
366 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 383 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
@@ -369,17 +386,20 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
369 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 386 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
370 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); 387 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
371 if (!ret) { 388 if (!ret) {
372 qla_printk(KERN_WARNING, ha, 389 ql_log(ql_log_warn, vha, 0x00e8,
373 "Response Queue create failed\n"); 390 "Failed to create response queue.\n");
374 goto fail2; 391 goto fail2;
375 } 392 }
376 } 393 }
377 ha->flags.cpu_affinity_enabled = 1; 394 ha->flags.cpu_affinity_enabled = 1;
378 395 ql_dbg(ql_dbg_multiq, vha, 0xc007,
379 DEBUG2(qla_printk(KERN_INFO, ha, 396 "CPU affinity mode enalbed, "
380 "CPU affinity mode enabled, no. of response" 397 "no. of response queues:%d no. of request queues:%d.\n",
381 " queues:%d, no. of request queues:%d\n", 398 ha->max_rsp_queues, ha->max_req_queues);
382 ha->max_rsp_queues, ha->max_req_queues)); 399 ql_dbg(ql_dbg_init, vha, 0x00e9,
400 "CPU affinity mode enalbed, "
401 "no. of response queues:%d no. of request queues:%d.\n",
402 ha->max_rsp_queues, ha->max_req_queues);
383 } 403 }
384 return 0; 404 return 0;
385fail2: 405fail2:
@@ -526,8 +546,11 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
526 struct qla_hw_data *ha = vha->hw; 546 struct qla_hw_data *ha = vha->hw;
527 547
528 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 548 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
529 if (!sp) 549 if (!sp) {
550 ql_log(ql_log_warn, vha, 0x3006,
551 "Memory allocation failed for sp.\n");
530 return sp; 552 return sp;
553 }
531 554
532 atomic_set(&sp->ref_count, 1); 555 atomic_set(&sp->ref_count, 1);
533 sp->fcport = fcport; 556 sp->fcport = fcport;
@@ -551,30 +574,43 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
551 int rval; 574 int rval;
552 575
553 if (ha->flags.eeh_busy) { 576 if (ha->flags.eeh_busy) {
554 if (ha->flags.pci_channel_io_perm_failure) 577 if (ha->flags.pci_channel_io_perm_failure) {
578 ql_dbg(ql_dbg_io, vha, 0x3001,
579 "PCI Channel IO permanent failure, exiting "
580 "cmd=%p.\n", cmd);
555 cmd->result = DID_NO_CONNECT << 16; 581 cmd->result = DID_NO_CONNECT << 16;
556 else 582 } else {
583 ql_dbg(ql_dbg_io, vha, 0x3002,
584 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
557 cmd->result = DID_REQUEUE << 16; 585 cmd->result = DID_REQUEUE << 16;
586 }
558 goto qc24_fail_command; 587 goto qc24_fail_command;
559 } 588 }
560 589
561 rval = fc_remote_port_chkready(rport); 590 rval = fc_remote_port_chkready(rport);
562 if (rval) { 591 if (rval) {
563 cmd->result = rval; 592 cmd->result = rval;
593 ql_dbg(ql_dbg_io, vha, 0x3003,
594 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
595 cmd, rval);
564 goto qc24_fail_command; 596 goto qc24_fail_command;
565 } 597 }
566 598
567 if (!vha->flags.difdix_supported && 599 if (!vha->flags.difdix_supported &&
568 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 600 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
569 DEBUG2(qla_printk(KERN_ERR, ha, 601 ql_dbg(ql_dbg_io, vha, 0x3004,
570 "DIF Cap Not Reg, fail DIF capable cmd's:%x\n", 602 "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
571 cmd->cmnd[0])); 603 cmd);
572 cmd->result = DID_NO_CONNECT << 16; 604 cmd->result = DID_NO_CONNECT << 16;
573 goto qc24_fail_command; 605 goto qc24_fail_command;
574 } 606 }
575 if (atomic_read(&fcport->state) != FCS_ONLINE) { 607 if (atomic_read(&fcport->state) != FCS_ONLINE) {
576 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 608 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
577 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 609 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
610 ql_dbg(ql_dbg_io, vha, 0x3005,
611 "Returning DNC, fcport_state=%d loop_state=%d.\n",
612 atomic_read(&fcport->state),
613 atomic_read(&base_vha->loop_state));
578 cmd->result = DID_NO_CONNECT << 16; 614 cmd->result = DID_NO_CONNECT << 16;
579 goto qc24_fail_command; 615 goto qc24_fail_command;
580 } 616 }
@@ -586,8 +622,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
586 goto qc24_host_busy; 622 goto qc24_host_busy;
587 623
588 rval = ha->isp_ops->start_scsi(sp); 624 rval = ha->isp_ops->start_scsi(sp);
589 if (rval != QLA_SUCCESS) 625 if (rval != QLA_SUCCESS) {
626 ql_dbg(ql_dbg_io, vha, 0x3013,
627 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
590 goto qc24_host_busy_free_sp; 628 goto qc24_host_busy_free_sp;
629 }
591 630
592 return 0; 631 return 0;
593 632
@@ -630,7 +669,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
630 int ret = QLA_SUCCESS; 669 int ret = QLA_SUCCESS;
631 670
632 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 671 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
633 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); 672 ql_dbg(ql_dbg_taskm, vha, 0x8005,
673 "Return:eh_wait.\n");
634 return ret; 674 return ret;
635 } 675 }
636 676
@@ -723,7 +763,8 @@ qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
723 else 763 else
724 return_status = QLA_FUNCTION_FAILED; 764 return_status = QLA_FUNCTION_FAILED;
725 765
726 DEBUG2(printk("%s return_status=%d\n", __func__, return_status)); 766 ql_dbg(ql_dbg_taskm, vha, 0x8019,
767 "%s return status=%d.\n", __func__, return_status);
727 768
728 return return_status; 769 return return_status;
729} 770}
@@ -831,10 +872,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
831 int wait = 0; 872 int wait = 0;
832 struct qla_hw_data *ha = vha->hw; 873 struct qla_hw_data *ha = vha->hw;
833 874
875 ql_dbg(ql_dbg_taskm, vha, 0x8000,
876 "Entered %s for cmd=%p.\n", __func__, cmd);
834 if (!CMD_SP(cmd)) 877 if (!CMD_SP(cmd))
835 return SUCCESS; 878 return SUCCESS;
836 879
837 ret = fc_block_scsi_eh(cmd); 880 ret = fc_block_scsi_eh(cmd);
881 ql_dbg(ql_dbg_taskm, vha, 0x8001,
882 "Return value of fc_block_scsi_eh=%d.\n", ret);
838 if (ret != 0) 883 if (ret != 0)
839 return ret; 884 return ret;
840 ret = SUCCESS; 885 ret = SUCCESS;
@@ -849,20 +894,19 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
849 return SUCCESS; 894 return SUCCESS;
850 } 895 }
851 896
852 DEBUG2(printk("%s(%ld): aborting sp %p from RISC.", 897 ql_dbg(ql_dbg_taskm, vha, 0x8002,
853 __func__, vha->host_no, sp)); 898 "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
854 899
855 /* Get a reference to the sp and drop the lock.*/ 900 /* Get a reference to the sp and drop the lock.*/
856 sp_get(sp); 901 sp_get(sp);
857 902
858 spin_unlock_irqrestore(&ha->hardware_lock, flags); 903 spin_unlock_irqrestore(&ha->hardware_lock, flags);
859 if (ha->isp_ops->abort_command(sp)) { 904 if (ha->isp_ops->abort_command(sp)) {
860 DEBUG2(printk("%s(%ld): abort_command " 905 ql_dbg(ql_dbg_taskm, vha, 0x8003,
861 "mbx failed.\n", __func__, vha->host_no)); 906 "Abort command mbx failed for cmd=%p.\n", cmd);
862 ret = FAILED;
863 } else { 907 } else {
864 DEBUG3(printk("%s(%ld): abort_command " 908 ql_dbg(ql_dbg_taskm, vha, 0x8004,
865 "mbx success.\n", __func__, vha->host_no)); 909 "Abort command mbx success.\n");
866 wait = 1; 910 wait = 1;
867 } 911 }
868 qla2x00_sp_compl(ha, sp); 912 qla2x00_sp_compl(ha, sp);
@@ -870,16 +914,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
870 /* Wait for the command to be returned. */ 914 /* Wait for the command to be returned. */
871 if (wait) { 915 if (wait) {
872 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 916 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
873 qla_printk(KERN_ERR, ha, 917 ql_log(ql_log_warn, vha, 0x8006,
874 "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n", 918 "Abort handler timed out for cmd=%p.\n", cmd);
875 vha->host_no, id, lun, ret);
876 ret = FAILED; 919 ret = FAILED;
877 } 920 }
878 } 921 }
879 922
880 qla_printk(KERN_INFO, ha, 923 ql_log(ql_log_info, vha, 0x801c,
881 "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n", 924 "Abort command issued -- %d %x.\n", wait, ret);
882 vha->host_no, id, lun, wait, ret);
883 925
884 return ret; 926 return ret;
885} 927}
@@ -947,40 +989,59 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
947 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 989 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
948 int err; 990 int err;
949 991
950 if (!fcport) 992 if (!fcport) {
993 ql_log(ql_log_warn, vha, 0x8007,
994 "fcport is NULL.\n");
951 return FAILED; 995 return FAILED;
996 }
952 997
953 err = fc_block_scsi_eh(cmd); 998 err = fc_block_scsi_eh(cmd);
999 ql_dbg(ql_dbg_taskm, vha, 0x8008,
1000 "fc_block_scsi_eh ret=%d.\n", err);
954 if (err != 0) 1001 if (err != 0)
955 return err; 1002 return err;
956 1003
957 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 1004 ql_log(ql_log_info, vha, 0x8009,
958 vha->host_no, cmd->device->id, cmd->device->lun, name); 1005 "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
1006 cmd->device->id, cmd->device->lun, cmd);
959 1007
960 err = 0; 1008 err = 0;
961 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1009 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1010 ql_log(ql_log_warn, vha, 0x800a,
1011 "Wait for hba online failed for cmd=%p.\n", cmd);
962 goto eh_reset_failed; 1012 goto eh_reset_failed;
1013 }
963 err = 1; 1014 err = 1;
964 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 1015 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
1016 ql_log(ql_log_warn, vha, 0x800b,
1017 "Wait for loop ready failed for cmd=%p.\n", cmd);
965 goto eh_reset_failed; 1018 goto eh_reset_failed;
1019 }
966 err = 2; 1020 err = 2;
967 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1021 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
968 != QLA_SUCCESS) 1022 != QLA_SUCCESS) {
1023 ql_log(ql_log_warn, vha, 0x800c,
1024 "do_reset failed for cmd=%p.\n", cmd);
969 goto eh_reset_failed; 1025 goto eh_reset_failed;
1026 }
970 err = 3; 1027 err = 3;
971 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1028 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
972 cmd->device->lun, type) != QLA_SUCCESS) 1029 cmd->device->lun, type) != QLA_SUCCESS) {
1030 ql_log(ql_log_warn, vha, 0x800d,
1031 "wait for peding cmds failed for cmd=%p.\n", cmd);
973 goto eh_reset_failed; 1032 goto eh_reset_failed;
1033 }
974 1034
975 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 1035 ql_log(ql_log_info, vha, 0x800e,
976 vha->host_no, cmd->device->id, cmd->device->lun, name); 1036 "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
1037 cmd->device->id, cmd->device->lun, cmd);
977 1038
978 return SUCCESS; 1039 return SUCCESS;
979 1040
980eh_reset_failed: 1041eh_reset_failed:
981 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n" 1042 ql_log(ql_log_info, vha, 0x800f,
982 , vha->host_no, cmd->device->id, cmd->device->lun, name, 1043 "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
983 reset_errors[err]); 1044 reset_errors[err], cmd->device->id, cmd->device->lun);
984 return FAILED; 1045 return FAILED;
985} 1046}
986 1047
@@ -1030,19 +1091,25 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1030 id = cmd->device->id; 1091 id = cmd->device->id;
1031 lun = cmd->device->lun; 1092 lun = cmd->device->lun;
1032 1093
1033 if (!fcport) 1094 if (!fcport) {
1095 ql_log(ql_log_warn, vha, 0x8010,
1096 "fcport is NULL.\n");
1034 return ret; 1097 return ret;
1098 }
1035 1099
1036 ret = fc_block_scsi_eh(cmd); 1100 ret = fc_block_scsi_eh(cmd);
1101 ql_dbg(ql_dbg_taskm, vha, 0x8011,
1102 "fc_block_scsi_eh ret=%d.\n", ret);
1037 if (ret != 0) 1103 if (ret != 0)
1038 return ret; 1104 return ret;
1039 ret = FAILED; 1105 ret = FAILED;
1040 1106
1041 qla_printk(KERN_INFO, vha->hw, 1107 ql_log(ql_log_info, vha, 0x8012,
1042 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun); 1108 "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
1043 1109
1044 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1110 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1045 DEBUG2(printk("%s failed:board disabled\n",__func__)); 1111 ql_log(ql_log_fatal, vha, 0x8013,
1112 "Wait for hba online failed board disabled.\n");
1046 goto eh_bus_reset_done; 1113 goto eh_bus_reset_done;
1047 } 1114 }
1048 1115
@@ -1055,12 +1122,15 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1055 1122
1056 /* Flush outstanding commands. */ 1123 /* Flush outstanding commands. */
1057 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1124 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1058 QLA_SUCCESS) 1125 QLA_SUCCESS) {
1126 ql_log(ql_log_warn, vha, 0x8014,
1127 "Wait for pending commands failed.\n");
1059 ret = FAILED; 1128 ret = FAILED;
1129 }
1060 1130
1061eh_bus_reset_done: 1131eh_bus_reset_done:
1062 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__, 1132 ql_log(ql_log_warn, vha, 0x802b,
1063 (ret == FAILED) ? "failed" : "succeeded"); 1133 "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
1064 1134
1065 return ret; 1135 return ret;
1066} 1136}
@@ -1093,16 +1163,21 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1093 id = cmd->device->id; 1163 id = cmd->device->id;
1094 lun = cmd->device->lun; 1164 lun = cmd->device->lun;
1095 1165
1096 if (!fcport) 1166 if (!fcport) {
1167 ql_log(ql_log_warn, vha, 0x8016,
1168 "fcport is NULL.\n");
1097 return ret; 1169 return ret;
1170 }
1098 1171
1099 ret = fc_block_scsi_eh(cmd); 1172 ret = fc_block_scsi_eh(cmd);
1173 ql_dbg(ql_dbg_taskm, vha, 0x8017,
1174 "fc_block_scsi_eh ret=%d.\n", ret);
1100 if (ret != 0) 1175 if (ret != 0)
1101 return ret; 1176 return ret;
1102 ret = FAILED; 1177 ret = FAILED;
1103 1178
1104 qla_printk(KERN_INFO, ha, 1179 ql_log(ql_log_info, vha, 0x8018,
1105 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); 1180 "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
1106 1181
1107 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1182 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1108 goto eh_host_reset_lock; 1183 goto eh_host_reset_lock;
@@ -1137,8 +1212,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1137 /* failed. schedule dpc to try */ 1212 /* failed. schedule dpc to try */
1138 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1213 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1139 1214
1140 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1215 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1216 ql_log(ql_log_warn, vha, 0x802a,
1217 "wait for hba online failed.\n");
1141 goto eh_host_reset_lock; 1218 goto eh_host_reset_lock;
1219 }
1142 } 1220 }
1143 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1221 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1144 } 1222 }
@@ -1149,7 +1227,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1149 ret = SUCCESS; 1227 ret = SUCCESS;
1150 1228
1151eh_host_reset_lock: 1229eh_host_reset_lock:
1152 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1230 qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
1153 (ret == FAILED) ? "failed" : "succeeded"); 1231 (ret == FAILED) ? "failed" : "succeeded");
1154 1232
1155 return ret; 1233 return ret;
@@ -1179,9 +1257,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1179 1257
1180 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1258 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1181 if (ret != QLA_SUCCESS) { 1259 if (ret != QLA_SUCCESS) {
1182 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1260 ql_dbg(ql_dbg_taskm, vha, 0x802c,
1183 "target_reset=%d d_id=%x.\n", __func__, 1261 "Bus Reset failed: Target Reset=%d "
1184 vha->host_no, ret, fcport->d_id.b24)); 1262 "d_id=%x.\n", ret, fcport->d_id.b24);
1185 } 1263 }
1186 } 1264 }
1187 } 1265 }
@@ -1189,9 +1267,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1189 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) { 1267 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1190 ret = qla2x00_full_login_lip(vha); 1268 ret = qla2x00_full_login_lip(vha);
1191 if (ret != QLA_SUCCESS) { 1269 if (ret != QLA_SUCCESS) {
1192 DEBUG2_3(printk("%s(%ld): failed: " 1270 ql_dbg(ql_dbg_taskm, vha, 0x802d,
1193 "full_login_lip=%d.\n", __func__, vha->host_no, 1271 "full_login_lip=%d.\n", ret);
1194 ret));
1195 } 1272 }
1196 atomic_set(&vha->loop_state, LOOP_DOWN); 1273 atomic_set(&vha->loop_state, LOOP_DOWN);
1197 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1274 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1202,8 +1279,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1202 if (ha->flags.enable_lip_reset) { 1279 if (ha->flags.enable_lip_reset) {
1203 ret = qla2x00_lip_reset(vha); 1280 ret = qla2x00_lip_reset(vha);
1204 if (ret != QLA_SUCCESS) { 1281 if (ret != QLA_SUCCESS) {
1205 DEBUG2_3(printk("%s(%ld): failed: " 1282 ql_dbg(ql_dbg_taskm, vha, 0x802e,
1206 "lip_reset=%d.\n", __func__, vha->host_no, ret)); 1283 "lip_reset failed (%d).\n", ret);
1207 } else 1284 } else
1208 qla2x00_wait_for_loop_ready(vha); 1285 qla2x00_wait_for_loop_ready(vha);
1209 } 1286 }
@@ -1302,17 +1379,17 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1302 if (!scsi_track_queue_full(sdev, qdepth)) 1379 if (!scsi_track_queue_full(sdev, qdepth))
1303 return; 1380 return;
1304 1381
1305 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, 1382 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1306 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 1383 "Queue depth adjusted-down "
1307 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1384 "to %d for scsi(%ld:%d:%d:%d).\n",
1308 sdev->queue_depth)); 1385 sdev->queue_depth, fcport->vha->host_no,
1386 sdev->channel, sdev->id, sdev->lun);
1309} 1387}
1310 1388
1311static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) 1389static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1312{ 1390{
1313 fc_port_t *fcport = sdev->hostdata; 1391 fc_port_t *fcport = sdev->hostdata;
1314 struct scsi_qla_host *vha = fcport->vha; 1392 struct scsi_qla_host *vha = fcport->vha;
1315 struct qla_hw_data *ha = vha->hw;
1316 struct req_que *req = NULL; 1393 struct req_que *req = NULL;
1317 1394
1318 req = vha->req; 1395 req = vha->req;
@@ -1327,10 +1404,11 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1327 else 1404 else
1328 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); 1405 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1329 1406
1330 DEBUG2(qla_printk(KERN_INFO, ha, 1407 ql_dbg(ql_dbg_io, vha, 0x302a,
1331 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 1408 "Queue depth adjusted-up to %d for "
1332 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1409 "scsi(%ld:%d:%d:%d).\n",
1333 sdev->queue_depth)); 1410 sdev->queue_depth, fcport->vha->host_no,
1411 sdev->channel, sdev->id, sdev->lun);
1334} 1412}
1335 1413
1336static int 1414static int
@@ -1776,6 +1854,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1776 ha->flags.port0 = 1; 1854 ha->flags.port0 = 1;
1777 else 1855 else
1778 ha->flags.port0 = 0; 1856 ha->flags.port0 = 0;
1857 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
1858 "device_type=0x%x port=%d fw_srisc_address=%p.\n",
1859 ha->device_type, ha->flags.port0, ha->fw_srisc_address);
1779} 1860}
1780 1861
1781static int 1862static int
@@ -1790,10 +1871,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1790 1871
1791 if (pci_request_selected_regions(ha->pdev, ha->bars, 1872 if (pci_request_selected_regions(ha->pdev, ha->bars,
1792 QLA2XXX_DRIVER_NAME)) { 1873 QLA2XXX_DRIVER_NAME)) {
1793 qla_printk(KERN_WARNING, ha, 1874 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1794 "Failed to reserve PIO/MMIO regions (%s)\n", 1875 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1795 pci_name(ha->pdev)); 1876 pci_name(ha->pdev));
1796
1797 goto iospace_error_exit; 1877 goto iospace_error_exit;
1798 } 1878 }
1799 if (!(ha->bars & 1)) 1879 if (!(ha->bars & 1))
@@ -1803,39 +1883,42 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1803 pio = pci_resource_start(ha->pdev, 0); 1883 pio = pci_resource_start(ha->pdev, 0);
1804 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1884 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1805 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1885 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1806 qla_printk(KERN_WARNING, ha, 1886 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1807 "Invalid PCI I/O region size (%s)...\n", 1887 "Invalid pci I/O region size (%s).\n",
1808 pci_name(ha->pdev)); 1888 pci_name(ha->pdev));
1809 pio = 0; 1889 pio = 0;
1810 } 1890 }
1811 } else { 1891 } else {
1812 qla_printk(KERN_WARNING, ha, 1892 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1813 "region #0 not a PIO resource (%s)...\n", 1893 "Region #0 no a PIO resource (%s).\n",
1814 pci_name(ha->pdev)); 1894 pci_name(ha->pdev));
1815 pio = 0; 1895 pio = 0;
1816 } 1896 }
1817 ha->pio_address = pio; 1897 ha->pio_address = pio;
1898 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1899 "PIO address=%p.\n",
1900 ha->pio_address);
1818 1901
1819skip_pio: 1902skip_pio:
1820 /* Use MMIO operations for all accesses. */ 1903 /* Use MMIO operations for all accesses. */
1821 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1904 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1822 qla_printk(KERN_ERR, ha, 1905 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1823 "region #1 not an MMIO resource (%s), aborting\n", 1906 "Region #1 not an MMIO resource (%s), aborting.\n",
1824 pci_name(ha->pdev)); 1907 pci_name(ha->pdev));
1825 goto iospace_error_exit; 1908 goto iospace_error_exit;
1826 } 1909 }
1827 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1910 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1828 qla_printk(KERN_ERR, ha, 1911 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1829 "Invalid PCI mem region size (%s), aborting\n", 1912 "Invalid PCI mem region size (%s), aborting.\n",
1830 pci_name(ha->pdev)); 1913 pci_name(ha->pdev));
1831 goto iospace_error_exit; 1914 goto iospace_error_exit;
1832 } 1915 }
1833 1916
1834 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1917 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1835 if (!ha->iobase) { 1918 if (!ha->iobase) {
1836 qla_printk(KERN_ERR, ha, 1919 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1837 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1920 "Cannot remap MMIO (%s), aborting.\n",
1838 1921 pci_name(ha->pdev));
1839 goto iospace_error_exit; 1922 goto iospace_error_exit;
1840 } 1923 }
1841 1924
@@ -1849,6 +1932,8 @@ skip_pio:
1849 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1932 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1850 pci_resource_len(ha->pdev, 3)); 1933 pci_resource_len(ha->pdev, 3));
1851 if (ha->mqiobase) { 1934 if (ha->mqiobase) {
1935 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1936 "MQIO Base=%p.\n", ha->mqiobase);
1852 /* Read MSIX vector size of the board */ 1937 /* Read MSIX vector size of the board */
1853 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1938 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1854 ha->msix_count = msix; 1939 ha->msix_count = msix;
@@ -1861,17 +1946,24 @@ skip_pio:
1861 ha->max_req_queues = 2; 1946 ha->max_req_queues = 2;
1862 } else if (ql2xmaxqueues > 1) { 1947 } else if (ql2xmaxqueues > 1) {
1863 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1948 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1864 QLA_MQ_SIZE : ql2xmaxqueues; 1949 QLA_MQ_SIZE : ql2xmaxqueues;
1865 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" 1950 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1866 " of request queues:%d\n", ha->max_req_queues)); 1951 "QoS mode set, max no of request queues:%d.\n",
1952 ha->max_req_queues);
1953 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1954 "QoS mode set, max no of request queues:%d.\n",
1955 ha->max_req_queues);
1867 } 1956 }
1868 qla_printk(KERN_INFO, ha, 1957 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1869 "MSI-X vector count: %d\n", msix); 1958 "MSI-X vector count: %d.\n", msix);
1870 } else 1959 } else
1871 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); 1960 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1961 "BAR 3 not enabled.\n");
1872 1962
1873mqiobase_exit: 1963mqiobase_exit:
1874 ha->msix_count = ha->max_rsp_queues + 1; 1964 ha->msix_count = ha->max_rsp_queues + 1;
1965 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1966 "MSIX Count:%d.\n", ha->msix_count);
1875 return (0); 1967 return (0);
1876 1968
1877iospace_error_exit: 1969iospace_error_exit:
@@ -1935,7 +2027,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1935 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) { 2027 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1936 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2028 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1937 mem_only = 1; 2029 mem_only = 1;
2030 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2031 "Mem only adapter.\n");
1938 } 2032 }
2033 ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2034 "Bars=%d.\n", bars);
1939 2035
1940 if (mem_only) { 2036 if (mem_only) {
1941 if (pci_enable_device_mem(pdev)) 2037 if (pci_enable_device_mem(pdev))
@@ -1950,9 +2046,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1950 2046
1951 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2047 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1952 if (!ha) { 2048 if (!ha) {
1953 DEBUG(printk("Unable to allocate memory for ha\n")); 2049 ql_log_pci(ql_log_fatal, pdev, 0x0009,
2050 "Unable to allocate memory for ha.\n");
1954 goto probe_out; 2051 goto probe_out;
1955 } 2052 }
2053 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2054 "Memory allocated for ha=%p.\n", ha);
1956 ha->pdev = pdev; 2055 ha->pdev = pdev;
1957 2056
1958 /* Clear our data area */ 2057 /* Clear our data area */
@@ -1974,10 +2073,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1974 if (ret) 2073 if (ret)
1975 goto probe_hw_failed; 2074 goto probe_hw_failed;
1976 2075
1977 qla_printk(KERN_INFO, ha, 2076 ql_log_pci(ql_log_info, pdev, 0x001d,
1978 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 2077 "Found an ISP%04X irq %d iobase 0x%p.\n",
1979 ha->iobase); 2078 pdev->device, pdev->irq, ha->iobase);
1980
1981 ha->prev_topology = 0; 2079 ha->prev_topology = 0;
1982 ha->init_cb_size = sizeof(init_cb_t); 2080 ha->init_cb_size = sizeof(init_cb_t);
1983 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2081 ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2078,7 +2176,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2078 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2176 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2079 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2177 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2080 } 2178 }
2081 2179 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
2180 "mbx_count=%d, req_length=%d, "
2181 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
2182 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
2183 ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
2184 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
2185 ha->nvram_npiv_size);
2186 ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
2187 "isp_ops=%p, flash_conf_off=%d, "
2188 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
2189 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
2190 ha->nvram_conf_off, ha->nvram_data_off);
2082 mutex_init(&ha->vport_lock); 2191 mutex_init(&ha->vport_lock);
2083 init_completion(&ha->mbx_cmd_comp); 2192 init_completion(&ha->mbx_cmd_comp);
2084 complete(&ha->mbx_cmd_comp); 2193 complete(&ha->mbx_cmd_comp);
@@ -2088,10 +2197,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2088 set_bit(0, (unsigned long *) ha->vp_idx_map); 2197 set_bit(0, (unsigned long *) ha->vp_idx_map);
2089 2198
2090 qla2x00_config_dma_addressing(ha); 2199 qla2x00_config_dma_addressing(ha);
2200 ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
2201 "64 Bit addressing is %s.\n",
2202 ha->flags.enable_64bit_addressing ? "enable" :
2203 "disable");
2091 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2204 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2092 if (!ret) { 2205 if (!ret) {
2093 qla_printk(KERN_WARNING, ha, 2206 ql_log_pci(ql_log_fatal, pdev, 0x0031,
2094 "[ERROR] Failed to allocate memory for adapter\n"); 2207 "Failed to allocate memory for adapter, aborting.\n");
2095 2208
2096 goto probe_hw_failed; 2209 goto probe_hw_failed;
2097 } 2210 }
@@ -2103,9 +2216,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2103 2216
2104 base_vha = qla2x00_create_host(sht, ha); 2217 base_vha = qla2x00_create_host(sht, ha);
2105 if (!base_vha) { 2218 if (!base_vha) {
2106 qla_printk(KERN_WARNING, ha,
2107 "[ERROR] Failed to allocate memory for scsi_host\n");
2108
2109 ret = -ENOMEM; 2219 ret = -ENOMEM;
2110 qla2x00_mem_free(ha); 2220 qla2x00_mem_free(ha);
2111 qla2x00_free_req_que(ha, req); 2221 qla2x00_free_req_que(ha, req);
@@ -2132,7 +2242,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2132 if (!IS_QLA82XX(ha)) 2242 if (!IS_QLA82XX(ha))
2133 host->sg_tablesize = QLA_SG_ALL; 2243 host->sg_tablesize = QLA_SG_ALL;
2134 } 2244 }
2135 2245 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2246 "can_queue=%d, req=%p, "
2247 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2248 host->can_queue, base_vha->req,
2249 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2136 host->max_id = max_id; 2250 host->max_id = max_id;
2137 host->this_id = 255; 2251 host->this_id = 255;
2138 host->cmd_per_lun = 3; 2252 host->cmd_per_lun = 3;
@@ -2146,6 +2260,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2146 host->transportt = qla2xxx_transport_template; 2260 host->transportt = qla2xxx_transport_template;
2147 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2261 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2148 2262
2263 ql_dbg(ql_dbg_init, base_vha, 0x0033,
2264 "max_id=%d this_id=%d "
2265 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
2266 "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
2267 host->this_id, host->cmd_per_lun, host->unique_id,
2268 host->max_cmd_len, host->max_channel, host->max_lun,
2269 host->transportt, sht->vendor_id);
2270
2149 /* Set up the irqs */ 2271 /* Set up the irqs */
2150 ret = qla2x00_request_irqs(ha, rsp); 2272 ret = qla2x00_request_irqs(ha, rsp);
2151 if (ret) 2273 if (ret)
@@ -2156,9 +2278,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2156 /* Alloc arrays of request and response ring ptrs */ 2278 /* Alloc arrays of request and response ring ptrs */
2157que_init: 2279que_init:
2158 if (!qla2x00_alloc_queues(ha)) { 2280 if (!qla2x00_alloc_queues(ha)) {
2159 qla_printk(KERN_WARNING, ha, 2281 ql_log(ql_log_fatal, base_vha, 0x003d,
2160 "[ERROR] Failed to allocate memory for queue" 2282 "Failed to allocate memory for queue pointers.. aborting.\n");
2161 " pointers\n");
2162 goto probe_init_failed; 2283 goto probe_init_failed;
2163 } 2284 }
2164 2285
@@ -2186,20 +2307,33 @@ que_init:
2186 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 2307 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2187 } 2308 }
2188 2309
2189 if (qla2x00_initialize_adapter(base_vha)) { 2310 ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
2190 qla_printk(KERN_WARNING, ha, 2311 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2191 "Failed to initialize adapter\n"); 2312 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2313 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
2314 "req->req_q_in=%p req->req_q_out=%p "
2315 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2316 req->req_q_in, req->req_q_out,
2317 rsp->rsp_q_in, rsp->rsp_q_out);
2318 ql_dbg(ql_dbg_init, base_vha, 0x003e,
2319 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2320 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2321 ql_dbg(ql_dbg_init, base_vha, 0x003f,
2322 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2323 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
2192 2324
2193 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 2325 if (qla2x00_initialize_adapter(base_vha)) {
2194 "Adapter flags %x.\n", 2326 ql_log(ql_log_fatal, base_vha, 0x00d6,
2195 base_vha->host_no, base_vha->device_flags)); 2327 "Failed to initialize adapter - Adapter flags %x.\n",
2328 base_vha->device_flags);
2196 2329
2197 if (IS_QLA82XX(ha)) { 2330 if (IS_QLA82XX(ha)) {
2198 qla82xx_idc_lock(ha); 2331 qla82xx_idc_lock(ha);
2199 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2332 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2200 QLA82XX_DEV_FAILED); 2333 QLA82XX_DEV_FAILED);
2201 qla82xx_idc_unlock(ha); 2334 qla82xx_idc_unlock(ha);
2202 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 2335 ql_log(ql_log_fatal, base_vha, 0x00d7,
2336 "HW State: FAILED.\n");
2203 } 2337 }
2204 2338
2205 ret = -ENODEV; 2339 ret = -ENODEV;
@@ -2208,9 +2342,8 @@ que_init:
2208 2342
2209 if (ha->mqenable) { 2343 if (ha->mqenable) {
2210 if (qla25xx_setup_mode(base_vha)) { 2344 if (qla25xx_setup_mode(base_vha)) {
2211 qla_printk(KERN_WARNING, ha, 2345 ql_log(ql_log_warn, base_vha, 0x00ec,
2212 "Can't create queues, falling back to single" 2346 "Failed to create queues, falling back to single queue mode.\n");
2213 " queue mode\n");
2214 goto que_init; 2347 goto que_init;
2215 } 2348 }
2216 } 2349 }
@@ -2222,13 +2355,15 @@ que_init:
2222 * Startup the kernel thread for this host adapter 2355 * Startup the kernel thread for this host adapter
2223 */ 2356 */
2224 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 2357 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2225 "%s_dpc", base_vha->host_str); 2358 "%s_dpc", base_vha->host_str);
2226 if (IS_ERR(ha->dpc_thread)) { 2359 if (IS_ERR(ha->dpc_thread)) {
2227 qla_printk(KERN_WARNING, ha, 2360 ql_log(ql_log_fatal, base_vha, 0x00ed,
2228 "Unable to start DPC thread!\n"); 2361 "Failed to start DPC thread.\n");
2229 ret = PTR_ERR(ha->dpc_thread); 2362 ret = PTR_ERR(ha->dpc_thread);
2230 goto probe_failed; 2363 goto probe_failed;
2231 } 2364 }
2365 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
2366 "DPC thread started successfully.\n");
2232 2367
2233skip_dpc: 2368skip_dpc:
2234 list_add_tail(&base_vha->list, &ha->vp_list); 2369 list_add_tail(&base_vha->list, &ha->vp_list);
@@ -2236,16 +2371,18 @@ skip_dpc:
2236 2371
2237 /* Initialized the timer */ 2372 /* Initialized the timer */
2238 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 2373 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2239 2374 ql_dbg(ql_dbg_init, base_vha, 0x00ef,
2240 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2375 "Started qla2x00_timer with "
2241 base_vha->host_no, ha)); 2376 "interval=%d.\n", WATCH_INTERVAL);
2377 ql_dbg(ql_dbg_init, base_vha, 0x00f0,
2378 "Detected hba at address=%p.\n",
2379 ha);
2242 2380
2243 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2381 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2244 if (ha->fw_attributes & BIT_4) { 2382 if (ha->fw_attributes & BIT_4) {
2245 base_vha->flags.difdix_supported = 1; 2383 base_vha->flags.difdix_supported = 1;
2246 DEBUG18(qla_printk(KERN_INFO, ha, 2384 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2247 "Registering for DIF/DIX type 1 and 3" 2385 "Registering for DIF/DIX type 1 and 3 protection.\n");
2248 " protection.\n"));
2249 scsi_host_set_prot(host, 2386 scsi_host_set_prot(host,
2250 SHOST_DIF_TYPE1_PROTECTION 2387 SHOST_DIF_TYPE1_PROTECTION
2251 | SHOST_DIF_TYPE2_PROTECTION 2388 | SHOST_DIF_TYPE2_PROTECTION
@@ -2267,6 +2404,9 @@ skip_dpc:
2267 base_vha->flags.init_done = 1; 2404 base_vha->flags.init_done = 1;
2268 base_vha->flags.online = 1; 2405 base_vha->flags.online = 1;
2269 2406
2407 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2408 "Init done and hba is online.\n");
2409
2270 scsi_scan_host(host); 2410 scsi_scan_host(host);
2271 2411
2272 qla2x00_alloc_sysfs_attr(base_vha); 2412 qla2x00_alloc_sysfs_attr(base_vha);
@@ -2275,14 +2415,17 @@ skip_dpc:
2275 2415
2276 qla2x00_dfs_setup(base_vha); 2416 qla2x00_dfs_setup(base_vha);
2277 2417
2278 qla_printk(KERN_INFO, ha, "\n" 2418 ql_log(ql_log_info, base_vha, 0x00fa,
2279 " QLogic Fibre Channel HBA Driver: %s\n" 2419 "QLogic Fibre Channed HBA Driver: %s.\n",
2280 " QLogic %s - %s\n" 2420 qla2x00_version_str);
2281 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 2421 ql_log(ql_log_info, base_vha, 0x00fb,
2282 qla2x00_version_str, ha->model_number, 2422 "QLogic %s - %s.\n",
2283 ha->model_desc ? ha->model_desc : "", pdev->device, 2423 ha->model_number, ha->model_desc ? ha->model_desc : "");
2284 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev), 2424 ql_log(ql_log_info, base_vha, 0x00fc,
2285 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, 2425 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
2426 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
2427 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
2428 base_vha->host_no,
2286 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2429 ha->isp_ops->fw_version_str(base_vha, fw_str));
2287 2430
2288 return 0; 2431 return 0;
@@ -2580,20 +2723,15 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2580 fcport->login_retry = vha->hw->login_retry_count; 2723 fcport->login_retry = vha->hw->login_retry_count;
2581 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2724 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2582 2725
2583 DEBUG(printk("scsi(%ld): Port login retry: " 2726 ql_dbg(ql_dbg_disc, vha, 0x2067,
2727 "Port login retry "
2584 "%02x%02x%02x%02x%02x%02x%02x%02x, " 2728 "%02x%02x%02x%02x%02x%02x%02x%02x, "
2585 "id = 0x%04x retry cnt=%d\n", 2729 "id = 0x%04x retry cnt=%d.\n",
2586 vha->host_no, 2730 fcport->port_name[0], fcport->port_name[1],
2587 fcport->port_name[0], 2731 fcport->port_name[2], fcport->port_name[3],
2588 fcport->port_name[1], 2732 fcport->port_name[4], fcport->port_name[5],
2589 fcport->port_name[2], 2733 fcport->port_name[6], fcport->port_name[7],
2590 fcport->port_name[3], 2734 fcport->loop_id, fcport->login_retry);
2591 fcport->port_name[4],
2592 fcport->port_name[5],
2593 fcport->port_name[6],
2594 fcport->port_name[7],
2595 fcport->loop_id,
2596 fcport->login_retry));
2597 } 2735 }
2598} 2736}
2599 2737
@@ -2676,6 +2814,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2676 ctx_cachep); 2814 ctx_cachep);
2677 if (!ha->ctx_mempool) 2815 if (!ha->ctx_mempool)
2678 goto fail_free_srb_mempool; 2816 goto fail_free_srb_mempool;
2817 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
2818 "ctx_cachep=%p ctx_mempool=%p.\n",
2819 ctx_cachep, ha->ctx_mempool);
2679 } 2820 }
2680 2821
2681 /* Get memory for cached NVRAM */ 2822 /* Get memory for cached NVRAM */
@@ -2690,22 +2831,29 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2690 if (!ha->s_dma_pool) 2831 if (!ha->s_dma_pool)
2691 goto fail_free_nvram; 2832 goto fail_free_nvram;
2692 2833
2834 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
2835 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
2836 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
2837
2693 if (IS_QLA82XX(ha) || ql2xenabledif) { 2838 if (IS_QLA82XX(ha) || ql2xenabledif) {
2694 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2839 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2695 DSD_LIST_DMA_POOL_SIZE, 8, 0); 2840 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2696 if (!ha->dl_dma_pool) { 2841 if (!ha->dl_dma_pool) {
2697 qla_printk(KERN_WARNING, ha, 2842 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
2698 "Memory Allocation failed - dl_dma_pool\n"); 2843 "Failed to allocate memory for dl_dma_pool.\n");
2699 goto fail_s_dma_pool; 2844 goto fail_s_dma_pool;
2700 } 2845 }
2701 2846
2702 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2847 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2703 FCP_CMND_DMA_POOL_SIZE, 8, 0); 2848 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2704 if (!ha->fcp_cmnd_dma_pool) { 2849 if (!ha->fcp_cmnd_dma_pool) {
2705 qla_printk(KERN_WARNING, ha, 2850 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
2706 "Memory Allocation failed - fcp_cmnd_dma_pool\n"); 2851 "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
2707 goto fail_dl_dma_pool; 2852 goto fail_dl_dma_pool;
2708 } 2853 }
2854 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
2855 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
2856 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
2709 } 2857 }
2710 2858
2711 /* Allocate memory for SNS commands */ 2859 /* Allocate memory for SNS commands */
@@ -2715,6 +2863,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2715 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2863 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2716 if (!ha->sns_cmd) 2864 if (!ha->sns_cmd)
2717 goto fail_dma_pool; 2865 goto fail_dma_pool;
2866 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
2867 "sns_cmd.\n", ha->sns_cmd);
2718 } else { 2868 } else {
2719 /* Get consistent memory allocated for MS IOCB */ 2869 /* Get consistent memory allocated for MS IOCB */
2720 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2870 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -2726,12 +2876,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2726 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2876 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2727 if (!ha->ct_sns) 2877 if (!ha->ct_sns)
2728 goto fail_free_ms_iocb; 2878 goto fail_free_ms_iocb;
2879 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
2880 "ms_iocb=%p ct_sns=%p.\n",
2881 ha->ms_iocb, ha->ct_sns);
2729 } 2882 }
2730 2883
2731 /* Allocate memory for request ring */ 2884 /* Allocate memory for request ring */
2732 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 2885 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2733 if (!*req) { 2886 if (!*req) {
2734 DEBUG(printk("Unable to allocate memory for req\n")); 2887 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
2888 "Failed to allocate memory for req.\n");
2735 goto fail_req; 2889 goto fail_req;
2736 } 2890 }
2737 (*req)->length = req_len; 2891 (*req)->length = req_len;
@@ -2739,14 +2893,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2739 ((*req)->length + 1) * sizeof(request_t), 2893 ((*req)->length + 1) * sizeof(request_t),
2740 &(*req)->dma, GFP_KERNEL); 2894 &(*req)->dma, GFP_KERNEL);
2741 if (!(*req)->ring) { 2895 if (!(*req)->ring) {
2742 DEBUG(printk("Unable to allocate memory for req_ring\n")); 2896 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
2897 "Failed to allocate memory for req_ring.\n");
2743 goto fail_req_ring; 2898 goto fail_req_ring;
2744 } 2899 }
2745 /* Allocate memory for response ring */ 2900 /* Allocate memory for response ring */
2746 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 2901 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2747 if (!*rsp) { 2902 if (!*rsp) {
2748 qla_printk(KERN_WARNING, ha, 2903 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
2749 "Unable to allocate memory for rsp\n"); 2904 "Failed to allocate memory for rsp.\n");
2750 goto fail_rsp; 2905 goto fail_rsp;
2751 } 2906 }
2752 (*rsp)->hw = ha; 2907 (*rsp)->hw = ha;
@@ -2755,19 +2910,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2755 ((*rsp)->length + 1) * sizeof(response_t), 2910 ((*rsp)->length + 1) * sizeof(response_t),
2756 &(*rsp)->dma, GFP_KERNEL); 2911 &(*rsp)->dma, GFP_KERNEL);
2757 if (!(*rsp)->ring) { 2912 if (!(*rsp)->ring) {
2758 qla_printk(KERN_WARNING, ha, 2913 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
2759 "Unable to allocate memory for rsp_ring\n"); 2914 "Failed to allocate memory for rsp_ring.\n");
2760 goto fail_rsp_ring; 2915 goto fail_rsp_ring;
2761 } 2916 }
2762 (*req)->rsp = *rsp; 2917 (*req)->rsp = *rsp;
2763 (*rsp)->req = *req; 2918 (*rsp)->req = *req;
2919 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
2920 "req=%p req->length=%d req->ring=%p rsp=%p "
2921 "rsp->length=%d rsp->ring=%p.\n",
2922 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
2923 (*rsp)->ring);
2764 /* Allocate memory for NVRAM data for vports */ 2924 /* Allocate memory for NVRAM data for vports */
2765 if (ha->nvram_npiv_size) { 2925 if (ha->nvram_npiv_size) {
2766 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 2926 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2767 ha->nvram_npiv_size, GFP_KERNEL); 2927 ha->nvram_npiv_size, GFP_KERNEL);
2768 if (!ha->npiv_info) { 2928 if (!ha->npiv_info) {
2769 qla_printk(KERN_WARNING, ha, 2929 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
2770 "Unable to allocate memory for npiv info\n"); 2930 "Failed to allocate memory for npiv_info.\n");
2771 goto fail_npiv_info; 2931 goto fail_npiv_info;
2772 } 2932 }
2773 } else 2933 } else
@@ -2779,6 +2939,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2779 &ha->ex_init_cb_dma); 2939 &ha->ex_init_cb_dma);
2780 if (!ha->ex_init_cb) 2940 if (!ha->ex_init_cb)
2781 goto fail_ex_init_cb; 2941 goto fail_ex_init_cb;
2942 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
2943 "ex_init_cb=%p.\n", ha->ex_init_cb);
2782 } 2944 }
2783 2945
2784 INIT_LIST_HEAD(&ha->gbl_dsd_list); 2946 INIT_LIST_HEAD(&ha->gbl_dsd_list);
@@ -2789,6 +2951,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2789 &ha->async_pd_dma); 2951 &ha->async_pd_dma);
2790 if (!ha->async_pd) 2952 if (!ha->async_pd)
2791 goto fail_async_pd; 2953 goto fail_async_pd;
2954 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
2955 "async_pd=%p.\n", ha->async_pd);
2792 } 2956 }
2793 2957
2794 INIT_LIST_HEAD(&ha->vp_list); 2958 INIT_LIST_HEAD(&ha->vp_list);
@@ -2854,7 +3018,8 @@ fail_free_init_cb:
2854 ha->init_cb = NULL; 3018 ha->init_cb = NULL;
2855 ha->init_cb_dma = 0; 3019 ha->init_cb_dma = 0;
2856fail: 3020fail:
2857 DEBUG(printk("%s: Memory allocation failure\n", __func__)); 3021 ql_log(ql_log_fatal, NULL, 0x0030,
3022 "Memory allocation failure.\n");
2858 return -ENOMEM; 3023 return -ENOMEM;
2859} 3024}
2860 3025
@@ -3003,8 +3168,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3003 3168
3004 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 3169 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
3005 if (host == NULL) { 3170 if (host == NULL) {
3006 printk(KERN_WARNING 3171 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
3007 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 3172 "Failed to allocate host from the scsi layer, aborting.\n");
3008 goto fail; 3173 goto fail;
3009 } 3174 }
3010 3175
@@ -3023,6 +3188,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3023 spin_lock_init(&vha->work_lock); 3188 spin_lock_init(&vha->work_lock);
3024 3189
3025 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3190 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3191 ql_dbg(ql_dbg_init, vha, 0x0041,
3192 "Allocated the host=%p hw=%p vha=%p dev_name=%s",
3193 vha->host, vha->hw, vha,
3194 dev_name(&(ha->pdev->dev)));
3195
3026 return vha; 3196 return vha;
3027 3197
3028fail: 3198fail:
@@ -3264,18 +3434,18 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3264 if (status == QLA_SUCCESS) { 3434 if (status == QLA_SUCCESS) {
3265 fcport->old_loop_id = fcport->loop_id; 3435 fcport->old_loop_id = fcport->loop_id;
3266 3436
3267 DEBUG(printk("scsi(%ld): port login OK: logged " 3437 ql_dbg(ql_dbg_disc, vha, 0x2003,
3268 "in ID 0x%x\n", vha->host_no, fcport->loop_id)); 3438 "Port login OK: logged in ID 0x%x.\n",
3439 fcport->loop_id);
3269 3440
3270 qla2x00_update_fcport(vha, fcport); 3441 qla2x00_update_fcport(vha, fcport);
3271 3442
3272 } else if (status == 1) { 3443 } else if (status == 1) {
3273 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3444 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3274 /* retry the login again */ 3445 /* retry the login again */
3275 DEBUG(printk("scsi(%ld): Retrying" 3446 ql_dbg(ql_dbg_disc, vha, 0x2007,
3276 " %d login again loop_id 0x%x\n", 3447 "Retrying %d login again loop_id 0x%x.\n",
3277 vha->host_no, fcport->login_retry, 3448 fcport->login_retry, fcport->loop_id);
3278 fcport->loop_id));
3279 } else { 3449 } else {
3280 fcport->login_retry = 0; 3450 fcport->login_retry = 0;
3281 } 3451 }
@@ -3315,26 +3485,27 @@ qla2x00_do_dpc(void *data)
3315 3485
3316 set_current_state(TASK_INTERRUPTIBLE); 3486 set_current_state(TASK_INTERRUPTIBLE);
3317 while (!kthread_should_stop()) { 3487 while (!kthread_should_stop()) {
3318 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3488 ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
3489 "DPC handler sleeping.\n");
3319 3490
3320 schedule(); 3491 schedule();
3321 __set_current_state(TASK_RUNNING); 3492 __set_current_state(TASK_RUNNING);
3322 3493
3323 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 3494 ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
3495 "DPC handler waking up.\n");
3496 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3497 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3324 3498
3325 /* Initialization not yet finished. Don't do anything yet. */ 3499 /* Initialization not yet finished. Don't do anything yet. */
3326 if (!base_vha->flags.init_done) 3500 if (!base_vha->flags.init_done)
3327 continue; 3501 continue;
3328 3502
3329 if (ha->flags.eeh_busy) { 3503 if (ha->flags.eeh_busy) {
3330 DEBUG17(qla_printk(KERN_WARNING, ha, 3504 ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
3331 "qla2x00_do_dpc: dpc_flags: %lx\n", 3505 "eeh_busy=%d.\n", ha->flags.eeh_busy);
3332 base_vha->dpc_flags));
3333 continue; 3506 continue;
3334 } 3507 }
3335 3508
3336 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
3337
3338 ha->dpc_active = 1; 3509 ha->dpc_active = 1;
3339 3510
3340 if (ha->flags.mbox_busy) { 3511 if (ha->flags.mbox_busy) {
@@ -3351,8 +3522,8 @@ qla2x00_do_dpc(void *data)
3351 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3522 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3352 QLA82XX_DEV_FAILED); 3523 QLA82XX_DEV_FAILED);
3353 qla82xx_idc_unlock(ha); 3524 qla82xx_idc_unlock(ha);
3354 qla_printk(KERN_INFO, ha, 3525 ql_log(ql_log_info, base_vha, 0x4004,
3355 "HW State: FAILED\n"); 3526 "HW State: FAILED.\n");
3356 qla82xx_device_state_handler(base_vha); 3527 qla82xx_device_state_handler(base_vha);
3357 continue; 3528 continue;
3358 } 3529 }
@@ -3360,10 +3531,8 @@ qla2x00_do_dpc(void *data)
3360 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 3531 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3361 &base_vha->dpc_flags)) { 3532 &base_vha->dpc_flags)) {
3362 3533
3363 DEBUG(printk(KERN_INFO 3534 ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
3364 "scsi(%ld): dpc: sched " 3535 "FCoE context reset scheduled.\n");
3365 "qla82xx_fcoe_ctx_reset ha = %p\n",
3366 base_vha->host_no, ha));
3367 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3536 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3368 &base_vha->dpc_flags))) { 3537 &base_vha->dpc_flags))) {
3369 if (qla82xx_fcoe_ctx_reset(base_vha)) { 3538 if (qla82xx_fcoe_ctx_reset(base_vha)) {
@@ -3377,18 +3546,16 @@ qla2x00_do_dpc(void *data)
3377 &base_vha->dpc_flags); 3546 &base_vha->dpc_flags);
3378 } 3547 }
3379 3548
3380 DEBUG(printk("scsi(%ld): dpc:" 3549 ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
3381 " qla82xx_fcoe_ctx_reset end\n", 3550 "FCoE context reset end.\n");
3382 base_vha->host_no));
3383 } 3551 }
3384 } 3552 }
3385 3553
3386 if (test_and_clear_bit(ISP_ABORT_NEEDED, 3554 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3387 &base_vha->dpc_flags)) { 3555 &base_vha->dpc_flags)) {
3388 3556
3389 DEBUG(printk("scsi(%ld): dpc: sched " 3557 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
3390 "qla2x00_abort_isp ha = %p\n", 3558 "ISP abort scheduled.\n");
3391 base_vha->host_no, ha));
3392 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3559 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3393 &base_vha->dpc_flags))) { 3560 &base_vha->dpc_flags))) {
3394 3561
@@ -3401,8 +3568,8 @@ qla2x00_do_dpc(void *data)
3401 &base_vha->dpc_flags); 3568 &base_vha->dpc_flags);
3402 } 3569 }
3403 3570
3404 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 3571 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
3405 base_vha->host_no)); 3572 "ISP abort end.\n");
3406 } 3573 }
3407 3574
3408 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { 3575 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
@@ -3411,9 +3578,8 @@ qla2x00_do_dpc(void *data)
3411 } 3578 }
3412 3579
3413 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 3580 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3414 DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched " 3581 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3415 "qla2x00_quiesce_needed ha = %p\n", 3582 "Quiescence mode scheduled.\n");
3416 base_vha->host_no, ha));
3417 qla82xx_device_state_handler(base_vha); 3583 qla82xx_device_state_handler(base_vha);
3418 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); 3584 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
3419 if (!ha->flags.quiesce_owner) { 3585 if (!ha->flags.quiesce_owner) {
@@ -3423,17 +3589,20 @@ qla2x00_do_dpc(void *data)
3423 qla82xx_clear_qsnt_ready(base_vha); 3589 qla82xx_clear_qsnt_ready(base_vha);
3424 qla82xx_idc_unlock(ha); 3590 qla82xx_idc_unlock(ha);
3425 } 3591 }
3592 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
3593 "Quiescence mode end.\n");
3426 } 3594 }
3427 3595
3428 if (test_and_clear_bit(RESET_MARKER_NEEDED, 3596 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3429 &base_vha->dpc_flags) && 3597 &base_vha->dpc_flags) &&
3430 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 3598 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
3431 3599
3432 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 3600 ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
3433 base_vha->host_no)); 3601 "Reset marker scheduled.\n");
3434
3435 qla2x00_rst_aen(base_vha); 3602 qla2x00_rst_aen(base_vha);
3436 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 3603 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
3604 ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
3605 "Reset marker end.\n");
3437 } 3606 }
3438 3607
3439 /* Retry each device up to login retry count */ 3608 /* Retry each device up to login retry count */
@@ -3442,19 +3611,18 @@ qla2x00_do_dpc(void *data)
3442 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 3611 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3443 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 3612 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
3444 3613
3445 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 3614 ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
3446 base_vha->host_no)); 3615 "Relogin scheduled.\n");
3447 qla2x00_relogin(base_vha); 3616 qla2x00_relogin(base_vha);
3448 3617 ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
3449 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 3618 "Relogin end.\n");
3450 base_vha->host_no));
3451 } 3619 }
3452 3620
3453 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 3621 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3454 &base_vha->dpc_flags)) { 3622 &base_vha->dpc_flags)) {
3455 3623
3456 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 3624 ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
3457 base_vha->host_no)); 3625 "Loop resync scheduled.\n");
3458 3626
3459 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 3627 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
3460 &base_vha->dpc_flags))) { 3628 &base_vha->dpc_flags))) {
@@ -3465,8 +3633,8 @@ qla2x00_do_dpc(void *data)
3465 &base_vha->dpc_flags); 3633 &base_vha->dpc_flags);
3466 } 3634 }
3467 3635
3468 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 3636 ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
3469 base_vha->host_no)); 3637 "Loop resync end.\n");
3470 } 3638 }
3471 3639
3472 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 3640 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
@@ -3489,7 +3657,8 @@ qla2x00_do_dpc(void *data)
3489 } /* End of while(1) */ 3657 } /* End of while(1) */
3490 __set_current_state(TASK_RUNNING); 3658 __set_current_state(TASK_RUNNING);
3491 3659
3492 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3660 ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
3661 "DPC handler exiting.\n");
3493 3662
3494 /* 3663 /*
3495 * Make sure that nobody tries to wake us up again. 3664 * Make sure that nobody tries to wake us up again.
@@ -3596,9 +3765,11 @@ void
3596qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) 3765qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3597{ 3766{
3598 if (atomic_read(&sp->ref_count) == 0) { 3767 if (atomic_read(&sp->ref_count) == 0) {
3599 DEBUG2(qla_printk(KERN_WARNING, ha, 3768 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
3600 "SP reference-count to ZERO -- sp=%p\n", sp)); 3769 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
3601 DEBUG2(BUG()); 3770 sp, sp->cmd);
3771 if (ql2xextended_error_logging & ql_dbg_io)
3772 BUG();
3602 return; 3773 return;
3603 } 3774 }
3604 if (!atomic_dec_and_test(&sp->ref_count)) 3775 if (!atomic_dec_and_test(&sp->ref_count))
@@ -3626,6 +3797,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3626 struct req_que *req; 3797 struct req_que *req;
3627 3798
3628 if (ha->flags.eeh_busy) { 3799 if (ha->flags.eeh_busy) {
3800 ql_dbg(ql_dbg_timer, vha, 0x6000,
3801 "EEH = %d, restarting timer.\n",
3802 ha->flags.eeh_busy);
3629 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3803 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3630 return; 3804 return;
3631 } 3805 }
@@ -3650,9 +3824,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3650 if (atomic_read(&vha->loop_down_timer) == 3824 if (atomic_read(&vha->loop_down_timer) ==
3651 vha->loop_down_abort_time) { 3825 vha->loop_down_abort_time) {
3652 3826
3653 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 3827 ql_log(ql_log_info, vha, 0x6008,
3654 "queues before time expire\n", 3828 "Loop down - aborting the queues before time expires.\n");
3655 vha->host_no));
3656 3829
3657 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3830 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3658 atomic_set(&vha->loop_state, LOOP_DEAD); 3831 atomic_set(&vha->loop_state, LOOP_DEAD);
@@ -3697,10 +3870,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3697 /* if the loop has been down for 4 minutes, reinit adapter */ 3870 /* if the loop has been down for 4 minutes, reinit adapter */
3698 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 3871 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3699 if (!(vha->device_flags & DFLG_NO_CABLE)) { 3872 if (!(vha->device_flags & DFLG_NO_CABLE)) {
3700 DEBUG(printk("scsi(%ld): Loop down - " 3873 ql_log(ql_log_warn, vha, 0x6009,
3701 "aborting ISP.\n",
3702 vha->host_no));
3703 qla_printk(KERN_WARNING, ha,
3704 "Loop down - aborting ISP.\n"); 3874 "Loop down - aborting ISP.\n");
3705 3875
3706 if (IS_QLA82XX(ha)) 3876 if (IS_QLA82XX(ha))
@@ -3711,9 +3881,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3711 &vha->dpc_flags); 3881 &vha->dpc_flags);
3712 } 3882 }
3713 } 3883 }
3714 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 3884 ql_dbg(ql_dbg_timer, vha, 0x600a,
3715 vha->host_no, 3885 "Loop down - seconds remaining %d.\n",
3716 atomic_read(&vha->loop_down_timer))); 3886 atomic_read(&vha->loop_down_timer));
3717 } 3887 }
3718 3888
3719 /* Check if beacon LED needs to be blinked for physical host only */ 3889 /* Check if beacon LED needs to be blinked for physical host only */
@@ -3736,8 +3906,27 @@ qla2x00_timer(scsi_qla_host_t *vha)
3736 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 3906 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3737 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 3907 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3738 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 3908 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3739 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) 3909 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
3910 ql_dbg(ql_dbg_timer, vha, 0x600b,
3911 "isp_abort_needed=%d loop_resync_needed=%d "
3912 "fcport_update_needed=%d start_dpc=%d "
3913 "reset_marker_needed=%d",
3914 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
3915 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
3916 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
3917 start_dpc,
3918 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
3919 ql_dbg(ql_dbg_timer, vha, 0x600c,
3920 "beacon_blink_needed=%d isp_unrecoverable=%d "
3921 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
3922 "relogin_needed=%d.\n",
3923 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
3924 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
3925 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
3926 test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
3927 test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
3740 qla2xxx_wake_dpc(vha); 3928 qla2xxx_wake_dpc(vha);
3929 }
3741 3930
3742 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3931 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3743} 3932}
@@ -3806,8 +3995,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
3806 goto out; 3995 goto out;
3807 3996
3808 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 3997 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3809 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 3998 ql_log(ql_log_warn, vha, 0x0063,
3810 "(%s).\n", vha->host_no, blob->name)); 3999 "Failed to load firmware image (%s).\n", blob->name);
3811 blob->fw = NULL; 4000 blob->fw = NULL;
3812 blob = NULL; 4001 blob = NULL;
3813 goto out; 4002 goto out;
@@ -3836,8 +4025,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3836 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 4025 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3837 struct qla_hw_data *ha = vha->hw; 4026 struct qla_hw_data *ha = vha->hw;
3838 4027
3839 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", 4028 ql_dbg(ql_dbg_aer, vha, 0x9000,
3840 state)); 4029 "PCI error detected, state %x.\n", state);
3841 4030
3842 switch (state) { 4031 switch (state) {
3843 case pci_channel_io_normal: 4032 case pci_channel_io_normal:
@@ -3850,9 +4039,9 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3850 ha->flags.isp82xx_fw_hung = 1; 4039 ha->flags.isp82xx_fw_hung = 1;
3851 if (ha->flags.mbox_busy) { 4040 if (ha->flags.mbox_busy) {
3852 ha->flags.mbox_int = 1; 4041 ha->flags.mbox_int = 1;
3853 DEBUG2(qla_printk(KERN_ERR, ha, 4042 ql_dbg(ql_dbg_aer, vha, 0x9001,
3854 "Due to pci channel io frozen, doing premature " 4043 "Due to pci channel io frozen, doing premature "
3855 "completion of mbx command\n")); 4044 "completion of mbx command.\n");
3856 complete(&ha->mbx_intr_comp); 4045 complete(&ha->mbx_intr_comp);
3857 } 4046 }
3858 } 4047 }
@@ -3900,8 +4089,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3900 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4089 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3901 4090
3902 if (risc_paused) { 4091 if (risc_paused) {
3903 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 4092 ql_log(ql_log_info, base_vha, 0x9003,
3904 "Dumping firmware!\n"); 4093 "RISC paused -- mmio_enabled, Dumping firmware.\n");
3905 ha->isp_ops->fw_dump(base_vha, 0); 4094 ha->isp_ops->fw_dump(base_vha, 0);
3906 4095
3907 return PCI_ERS_RESULT_NEED_RESET; 4096 return PCI_ERS_RESULT_NEED_RESET;
@@ -3917,8 +4106,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3917 int fn; 4106 int fn;
3918 struct pci_dev *other_pdev = NULL; 4107 struct pci_dev *other_pdev = NULL;
3919 4108
3920 DEBUG17(qla_printk(KERN_INFO, ha, 4109 ql_dbg(ql_dbg_aer, base_vha, 0x9006,
3921 "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no)); 4110 "Entered %s.\n", __func__);
3922 4111
3923 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 4112 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3924 4113
@@ -3932,8 +4121,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3932 fn = PCI_FUNC(ha->pdev->devfn); 4121 fn = PCI_FUNC(ha->pdev->devfn);
3933 while (fn > 0) { 4122 while (fn > 0) {
3934 fn--; 4123 fn--;
3935 DEBUG17(qla_printk(KERN_INFO, ha, 4124 ql_dbg(ql_dbg_aer, base_vha, 0x9007,
3936 "Finding pci device at function = 0x%x\n", fn)); 4125 "Finding pci device at function = 0x%x.\n", fn);
3937 other_pdev = 4126 other_pdev =
3938 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 4127 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3939 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 4128 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
@@ -3942,9 +4131,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3942 if (!other_pdev) 4131 if (!other_pdev)
3943 continue; 4132 continue;
3944 if (atomic_read(&other_pdev->enable_cnt)) { 4133 if (atomic_read(&other_pdev->enable_cnt)) {
3945 DEBUG17(qla_printk(KERN_INFO, ha, 4134 ql_dbg(ql_dbg_aer, base_vha, 0x9008,
3946 "Found PCI func available and enabled at 0x%x\n", 4135 "Found PCI func available and enable at 0x%x.\n",
3947 fn)); 4136 fn);
3948 pci_dev_put(other_pdev); 4137 pci_dev_put(other_pdev);
3949 break; 4138 break;
3950 } 4139 }
@@ -3953,8 +4142,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3953 4142
3954 if (!fn) { 4143 if (!fn) {
3955 /* Reset owner */ 4144 /* Reset owner */
3956 DEBUG17(qla_printk(KERN_INFO, ha, 4145 ql_dbg(ql_dbg_aer, base_vha, 0x9009,
3957 "This devfn is reset owner = 0x%x\n", ha->pdev->devfn)); 4146 "This devfn is reset owner = 0x%x.\n",
4147 ha->pdev->devfn);
3958 qla82xx_idc_lock(ha); 4148 qla82xx_idc_lock(ha);
3959 4149
3960 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4150 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
@@ -3964,8 +4154,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3964 QLA82XX_IDC_VERSION); 4154 QLA82XX_IDC_VERSION);
3965 4155
3966 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 4156 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3967 DEBUG17(qla_printk(KERN_INFO, ha, 4157 ql_dbg(ql_dbg_aer, base_vha, 0x900a,
3968 "drv_active = 0x%x\n", drv_active)); 4158 "drv_active = 0x%x.\n", drv_active);
3969 4159
3970 qla82xx_idc_unlock(ha); 4160 qla82xx_idc_unlock(ha);
3971 /* Reset if device is not already reset 4161 /* Reset if device is not already reset
@@ -3978,12 +4168,14 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3978 qla82xx_idc_lock(ha); 4168 qla82xx_idc_lock(ha);
3979 4169
3980 if (rval != QLA_SUCCESS) { 4170 if (rval != QLA_SUCCESS) {
3981 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 4171 ql_log(ql_log_info, base_vha, 0x900b,
4172 "HW State: FAILED.\n");
3982 qla82xx_clear_drv_active(ha); 4173 qla82xx_clear_drv_active(ha);
3983 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4174 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3984 QLA82XX_DEV_FAILED); 4175 QLA82XX_DEV_FAILED);
3985 } else { 4176 } else {
3986 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 4177 ql_log(ql_log_info, base_vha, 0x900c,
4178 "HW State: READY.\n");
3987 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4179 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3988 QLA82XX_DEV_READY); 4180 QLA82XX_DEV_READY);
3989 qla82xx_idc_unlock(ha); 4181 qla82xx_idc_unlock(ha);
@@ -3996,8 +4188,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3996 } 4188 }
3997 qla82xx_idc_unlock(ha); 4189 qla82xx_idc_unlock(ha);
3998 } else { 4190 } else {
3999 DEBUG17(qla_printk(KERN_INFO, ha, 4191 ql_dbg(ql_dbg_aer, base_vha, 0x900d,
4000 "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); 4192 "This devfn is not reset owner = 0x%x.\n",
4193 ha->pdev->devfn);
4001 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 4194 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4002 QLA82XX_DEV_READY)) { 4195 QLA82XX_DEV_READY)) {
4003 ha->flags.isp82xx_fw_hung = 0; 4196 ha->flags.isp82xx_fw_hung = 0;
@@ -4021,7 +4214,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4021 struct rsp_que *rsp; 4214 struct rsp_que *rsp;
4022 int rc, retries = 10; 4215 int rc, retries = 10;
4023 4216
4024 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); 4217 ql_dbg(ql_dbg_aer, base_vha, 0x9004,
4218 "Slot Reset.\n");
4025 4219
4026 /* Workaround: qla2xxx driver which access hardware earlier 4220 /* Workaround: qla2xxx driver which access hardware earlier
4027 * needs error state to be pci_channel_io_online. 4221 * needs error state to be pci_channel_io_online.
@@ -4042,7 +4236,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4042 rc = pci_enable_device(pdev); 4236 rc = pci_enable_device(pdev);
4043 4237
4044 if (rc) { 4238 if (rc) {
4045 qla_printk(KERN_WARNING, ha, 4239 ql_log(ql_log_warn, base_vha, 0x9005,
4046 "Can't re-enable PCI device after reset.\n"); 4240 "Can't re-enable PCI device after reset.\n");
4047 goto exit_slot_reset; 4241 goto exit_slot_reset;
4048 } 4242 }
@@ -4072,8 +4266,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4072 4266
4073 4267
4074exit_slot_reset: 4268exit_slot_reset:
4075 DEBUG17(qla_printk(KERN_WARNING, ha, 4269 ql_dbg(ql_dbg_aer, base_vha, 0x900e,
4076 "slot_reset-return:ret=%x\n", ret)); 4270 "slot_reset return %x.\n", ret);
4077 4271
4078 return ret; 4272 return ret;
4079} 4273}
@@ -4085,13 +4279,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
4085 struct qla_hw_data *ha = base_vha->hw; 4279 struct qla_hw_data *ha = base_vha->hw;
4086 int ret; 4280 int ret;
4087 4281
4088 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); 4282 ql_dbg(ql_dbg_aer, base_vha, 0x900f,
4283 "pci_resume.\n");
4089 4284
4090 ret = qla2x00_wait_for_hba_online(base_vha); 4285 ret = qla2x00_wait_for_hba_online(base_vha);
4091 if (ret != QLA_SUCCESS) { 4286 if (ret != QLA_SUCCESS) {
4092 qla_printk(KERN_ERR, ha, 4287 ql_log(ql_log_fatal, base_vha, 0x9002,
4093 "the device failed to resume I/O " 4288 "The device failed to resume I/O from slot/link_reset.\n");
4094 "from slot/link_reset");
4095 } 4289 }
4096 4290
4097 pci_cleanup_aer_uncorrect_error_status(pdev); 4291 pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -4155,8 +4349,8 @@ qla2x00_module_init(void)
4155 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 4349 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
4156 SLAB_HWCACHE_ALIGN, NULL); 4350 SLAB_HWCACHE_ALIGN, NULL);
4157 if (srb_cachep == NULL) { 4351 if (srb_cachep == NULL) {
4158 printk(KERN_ERR 4352 ql_log(ql_log_fatal, NULL, 0x0001,
4159 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 4353 "Unable to allocate SRB cache...Failing load!.\n");
4160 return -ENOMEM; 4354 return -ENOMEM;
4161 } 4355 }
4162 4356
@@ -4169,13 +4363,15 @@ qla2x00_module_init(void)
4169 fc_attach_transport(&qla2xxx_transport_functions); 4363 fc_attach_transport(&qla2xxx_transport_functions);
4170 if (!qla2xxx_transport_template) { 4364 if (!qla2xxx_transport_template) {
4171 kmem_cache_destroy(srb_cachep); 4365 kmem_cache_destroy(srb_cachep);
4366 ql_log(ql_log_fatal, NULL, 0x0002,
4367 "fc_attach_transport failed...Failing load!.\n");
4172 return -ENODEV; 4368 return -ENODEV;
4173 } 4369 }
4174 4370
4175 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 4371 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
4176 if (apidev_major < 0) { 4372 if (apidev_major < 0) {
4177 printk(KERN_WARNING "qla2xxx: Unable to register char device " 4373 ql_log(ql_log_fatal, NULL, 0x0003,
4178 "%s\n", QLA2XXX_APIDEV); 4374 "Unable to register char device %s.\n", QLA2XXX_APIDEV);
4179 } 4375 }
4180 4376
4181 qla2xxx_transport_vport_template = 4377 qla2xxx_transport_vport_template =
@@ -4183,16 +4379,21 @@ qla2x00_module_init(void)
4183 if (!qla2xxx_transport_vport_template) { 4379 if (!qla2xxx_transport_vport_template) {
4184 kmem_cache_destroy(srb_cachep); 4380 kmem_cache_destroy(srb_cachep);
4185 fc_release_transport(qla2xxx_transport_template); 4381 fc_release_transport(qla2xxx_transport_template);
4382 ql_log(ql_log_fatal, NULL, 0x0004,
4383 "fc_attach_transport vport failed...Failing load!.\n");
4186 return -ENODEV; 4384 return -ENODEV;
4187 } 4385 }
4188 4386 ql_log(ql_log_info, NULL, 0x0005,
4189 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", 4387 "QLogic Fibre Channel HBA Driver: %s.\n",
4190 qla2x00_version_str); 4388 qla2x00_version_str);
4191 ret = pci_register_driver(&qla2xxx_pci_driver); 4389 ret = pci_register_driver(&qla2xxx_pci_driver);
4192 if (ret) { 4390 if (ret) {
4193 kmem_cache_destroy(srb_cachep); 4391 kmem_cache_destroy(srb_cachep);
4194 fc_release_transport(qla2xxx_transport_template); 4392 fc_release_transport(qla2xxx_transport_template);
4195 fc_release_transport(qla2xxx_transport_vport_template); 4393 fc_release_transport(qla2xxx_transport_vport_template);
4394 ql_log(ql_log_fatal, NULL, 0x0006,
4395 "pci_register_driver failed...ret=%d Failing load!.\n",
4396 ret);
4196 } 4397 }
4197 return ret; 4398 return ret;
4198} 4399}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 693647661ed1..eff13563c82d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
189 uint16_t word; 189 uint16_t word;
190 uint32_t nv_cmd, wait_cnt; 190 uint32_t nv_cmd, wait_cnt;
191 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 191 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
192 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
192 193
193 qla2x00_nv_write(ha, NVR_DATA_OUT); 194 qla2x00_nv_write(ha, NVR_DATA_OUT);
194 qla2x00_nv_write(ha, 0); 195 qla2x00_nv_write(ha, 0);
@@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
220 wait_cnt = NVR_WAIT_CNT; 221 wait_cnt = NVR_WAIT_CNT;
221 do { 222 do {
222 if (!--wait_cnt) { 223 if (!--wait_cnt) {
223 DEBUG9_10(qla_printk(KERN_WARNING, ha, 224 ql_dbg(ql_dbg_user, vha, 0x708d,
224 "NVRAM didn't go ready...\n")); 225 "NVRAM didn't go ready...\n");
225 break; 226 break;
226 } 227 }
227 NVRAM_DELAY(); 228 NVRAM_DELAY();
@@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
308 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 309 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
309 uint32_t word, wait_cnt; 310 uint32_t word, wait_cnt;
310 uint16_t wprot, wprot_old; 311 uint16_t wprot, wprot_old;
312 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
311 313
312 /* Clear NVRAM write protection. */ 314 /* Clear NVRAM write protection. */
313 ret = QLA_FUNCTION_FAILED; 315 ret = QLA_FUNCTION_FAILED;
@@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
350 wait_cnt = NVR_WAIT_CNT; 352 wait_cnt = NVR_WAIT_CNT;
351 do { 353 do {
352 if (!--wait_cnt) { 354 if (!--wait_cnt) {
353 DEBUG9_10(qla_printk(KERN_WARNING, ha, 355 ql_dbg(ql_dbg_user, vha, 0x708e,
354 "NVRAM didn't go ready...\n")); 356 "NVRAM didn't go ready...\n");
355 break; 357 break;
356 } 358 }
357 NVRAM_DELAY(); 359 NVRAM_DELAY();
@@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
371{ 373{
372 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 374 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
373 uint32_t word, wait_cnt; 375 uint32_t word, wait_cnt;
376 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
374 377
375 if (stat != QLA_SUCCESS) 378 if (stat != QLA_SUCCESS)
376 return; 379 return;
@@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
409 wait_cnt = NVR_WAIT_CNT; 412 wait_cnt = NVR_WAIT_CNT;
410 do { 413 do {
411 if (!--wait_cnt) { 414 if (!--wait_cnt) {
412 DEBUG9_10(qla_printk(KERN_WARNING, ha, 415 ql_dbg(ql_dbg_user, vha, 0x708f,
413 "NVRAM didn't go ready...\n")); 416 "NVRAM didn't go ready...\n");
414 break; 417 break;
415 } 418 }
416 NVRAM_DELAY(); 419 NVRAM_DELAY();
@@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
607 for (chksum = 0; cnt; cnt--) 610 for (chksum = 0; cnt; cnt--)
608 chksum += le16_to_cpu(*wptr++); 611 chksum += le16_to_cpu(*wptr++);
609 if (chksum) { 612 if (chksum) {
610 qla_printk(KERN_ERR, ha, 613 ql_log(ql_log_fatal, vha, 0x0045,
611 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); 614 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
612 qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location)); 615 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
616 buf, sizeof(struct qla_flt_location));
613 return QLA_FUNCTION_FAILED; 617 return QLA_FUNCTION_FAILED;
614 } 618 }
615 619
@@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
618 *start = (le16_to_cpu(fltl->start_hi) << 16 | 622 *start = (le16_to_cpu(fltl->start_hi) << 16 |
619 le16_to_cpu(fltl->start_lo)) >> 2; 623 le16_to_cpu(fltl->start_lo)) >> 2;
620end: 624end:
621 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); 625 ql_dbg(ql_dbg_init, vha, 0x0046,
626 "FLTL[%s] = 0x%x.\n",
627 loc, *start);
622 return QLA_SUCCESS; 628 return QLA_SUCCESS;
623} 629}
624 630
@@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
685 if (*wptr == __constant_cpu_to_le16(0xffff)) 691 if (*wptr == __constant_cpu_to_le16(0xffff))
686 goto no_flash_data; 692 goto no_flash_data;
687 if (flt->version != __constant_cpu_to_le16(1)) { 693 if (flt->version != __constant_cpu_to_le16(1)) {
688 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: " 694 ql_log(ql_log_warn, vha, 0x0047,
689 "version=0x%x length=0x%x checksum=0x%x.\n", 695 "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
690 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 696 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
691 le16_to_cpu(flt->checksum))); 697 le16_to_cpu(flt->checksum));
692 goto no_flash_data; 698 goto no_flash_data;
693 } 699 }
694 700
@@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
696 for (chksum = 0; cnt; cnt--) 702 for (chksum = 0; cnt; cnt--)
697 chksum += le16_to_cpu(*wptr++); 703 chksum += le16_to_cpu(*wptr++);
698 if (chksum) { 704 if (chksum) {
699 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: " 705 ql_log(ql_log_fatal, vha, 0x0048,
700 "version=0x%x length=0x%x checksum=0x%x.\n", 706 "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
701 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 707 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
702 chksum)); 708 le16_to_cpu(flt->checksum));
703 goto no_flash_data; 709 goto no_flash_data;
704 } 710 }
705 711
@@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
708 for ( ; cnt; cnt--, region++) { 714 for ( ; cnt; cnt--, region++) {
709 /* Store addresses as DWORD offsets. */ 715 /* Store addresses as DWORD offsets. */
710 start = le32_to_cpu(region->start) >> 2; 716 start = le32_to_cpu(region->start) >> 2;
711 717 ql_dbg(ql_dbg_init, vha, 0x0049,
712 DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x " 718 "FLT[%02x]: start=0x%x "
713 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, 719 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
714 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); 720 start, le32_to_cpu(region->end) >> 2,
721 le32_to_cpu(region->size));
715 722
716 switch (le32_to_cpu(region->code) & 0xff) { 723 switch (le32_to_cpu(region->code) & 0xff) {
717 case FLT_REG_FW: 724 case FLT_REG_FW:
@@ -796,12 +803,16 @@ no_flash_data:
796 ha->flt_region_npiv_conf = ha->flags.port0 ? 803 ha->flt_region_npiv_conf = ha->flags.port0 ?
797 def_npiv_conf0[def] : def_npiv_conf1[def]; 804 def_npiv_conf0[def] : def_npiv_conf1[def];
798done: 805done:
799 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 806 ql_dbg(ql_dbg_init, vha, 0x004a,
800 "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x " 807 "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
801 "npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot, 808 loc, ha->flt_region_boot,
802 ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd, 809 ha->flt_region_fw, ha->flt_region_vpd_nvram,
803 ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt, 810 ha->flt_region_vpd);
804 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio)); 811 ql_dbg(ql_dbg_init, vha, 0x004b,
812 "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
813 ha->flt_region_nvram,
814 ha->flt_region_fdt, ha->flt_region_flt,
815 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
805} 816}
806 817
807static void 818static void
@@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
833 cnt++) 844 cnt++)
834 chksum += le16_to_cpu(*wptr++); 845 chksum += le16_to_cpu(*wptr++);
835 if (chksum) { 846 if (chksum) {
836 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: " 847 ql_dbg(ql_dbg_init, vha, 0x004c,
837 "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0], 848 "Inconsistent FDT detected:"
838 le16_to_cpu(fdt->version))); 849 " checksum=0x%x id=%c version0x%x.\n", chksum,
839 DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt))); 850 fdt->sig[0], le16_to_cpu(fdt->version));
851 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
852 (uint8_t *)fdt, sizeof(*fdt));
840 goto no_flash_data; 853 goto no_flash_data;
841 } 854 }
842 855
@@ -890,11 +903,12 @@ no_flash_data:
890 break; 903 break;
891 } 904 }
892done: 905done:
893 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " 906 ql_dbg(ql_dbg_init, vha, 0x004d,
894 "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, 907 "FDT[%x]: (0x%x/0x%x) erase=0x%x "
908 "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
895 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 909 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
896 ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable, 910 ha->fdt_wrt_disable, ha->fdt_block_size);
897 ha->fdt_block_size)); 911
898} 912}
899 913
900static void 914static void
@@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
919 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); 933 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
920 ha->nx_reset_timeout = le32_to_cpu(*wptr); 934 ha->nx_reset_timeout = le32_to_cpu(*wptr);
921 } 935 }
936 ql_dbg(ql_dbg_init, vha, 0x004e,
937 "nx_dev_init_timeout=%d "
938 "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
939 ha->nx_reset_timeout);
922 return; 940 return;
923} 941}
924 942
@@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
963 if (hdr.version == __constant_cpu_to_le16(0xffff)) 981 if (hdr.version == __constant_cpu_to_le16(0xffff))
964 return; 982 return;
965 if (hdr.version != __constant_cpu_to_le16(1)) { 983 if (hdr.version != __constant_cpu_to_le16(1)) {
966 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config " 984 ql_dbg(ql_dbg_user, vha, 0x7090,
985 "Unsupported NPIV-Config "
967 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 986 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
968 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), 987 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
969 le16_to_cpu(hdr.checksum))); 988 le16_to_cpu(hdr.checksum));
970 return; 989 return;
971 } 990 }
972 991
973 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL); 992 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
974 if (!data) { 993 if (!data) {
975 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to " 994 ql_log(ql_log_warn, vha, 0x7091,
976 "allocate memory.\n")); 995 "Unable to allocate memory for data.\n");
977 return; 996 return;
978 } 997 }
979 998
@@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
985 for (wptr = data, chksum = 0; cnt; cnt--) 1004 for (wptr = data, chksum = 0; cnt; cnt--)
986 chksum += le16_to_cpu(*wptr++); 1005 chksum += le16_to_cpu(*wptr++);
987 if (chksum) { 1006 if (chksum) {
988 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config " 1007 ql_dbg(ql_dbg_user, vha, 0x7092,
1008 "Inconsistent NPIV-Config "
989 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 1009 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
990 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), 1010 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
991 chksum)); 1011 le16_to_cpu(hdr.checksum));
992 goto done; 1012 goto done;
993 } 1013 }
994 1014
@@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1014 vid.port_name = wwn_to_u64(entry->port_name); 1034 vid.port_name = wwn_to_u64(entry->port_name);
1015 vid.node_name = wwn_to_u64(entry->node_name); 1035 vid.node_name = wwn_to_u64(entry->node_name);
1016 1036
1017 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " 1037 ql_dbg(ql_dbg_user, vha, 0x7093,
1018 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 1038 "NPIV[%02x]: wwpn=%llx "
1019 (unsigned long long)vid.port_name, 1039 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
1020 (unsigned long long)vid.node_name, 1040 (unsigned long long)vid.port_name,
1021 le16_to_cpu(entry->vf_id), 1041 (unsigned long long)vid.node_name,
1022 entry->q_qos, entry->f_qos)); 1042 le16_to_cpu(entry->vf_id),
1043 entry->q_qos, entry->f_qos);
1023 1044
1024 if (i < QLA_PRECONFIG_VPORTS) { 1045 if (i < QLA_PRECONFIG_VPORTS) {
1025 vport = fc_vport_create(vha->host, 0, &vid); 1046 vport = fc_vport_create(vha->host, 0, &vid);
1026 if (!vport) 1047 if (!vport)
1027 qla_printk(KERN_INFO, ha, 1048 ql_log(ql_log_warn, vha, 0x7094,
1028 "NPIV-Config: Failed to create vport [%02x]: " 1049 "NPIV-Config Failed to create vport [%02x]: "
1029 "wwpn=%llx wwnn=%llx.\n", cnt, 1050 "wwpn=%llx wwnn=%llx.\n", cnt,
1030 (unsigned long long)vid.port_name, 1051 (unsigned long long)vid.port_name,
1031 (unsigned long long)vid.node_name); 1052 (unsigned long long)vid.node_name);
1032 } 1053 }
1033 } 1054 }
1034done: 1055done:
@@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1127 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 1148 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
1128 &optrom_dma, GFP_KERNEL); 1149 &optrom_dma, GFP_KERNEL);
1129 if (!optrom) { 1150 if (!optrom) {
1130 qla_printk(KERN_DEBUG, ha, 1151 ql_log(ql_log_warn, vha, 0x7095,
1131 "Unable to allocate memory for optrom burst write " 1152 "Unable to allocate "
1132 "(%x KB).\n", OPTROM_BURST_SIZE / 1024); 1153 "memory for optrom burst write (%x KB).\n",
1154 OPTROM_BURST_SIZE / 1024);
1133 } 1155 }
1134 } 1156 }
1135 1157
@@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1138 1160
1139 ret = qla24xx_unprotect_flash(vha); 1161 ret = qla24xx_unprotect_flash(vha);
1140 if (ret != QLA_SUCCESS) { 1162 if (ret != QLA_SUCCESS) {
1141 qla_printk(KERN_WARNING, ha, 1163 ql_log(ql_log_warn, vha, 0x7096,
1142 "Unable to unprotect flash for update.\n"); 1164 "Unable to unprotect flash for update.\n");
1143 goto done; 1165 goto done;
1144 } 1166 }
@@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1156 0xff0000) | ((fdata >> 16) & 0xff)); 1178 0xff0000) | ((fdata >> 16) & 0xff));
1157 ret = qla24xx_erase_sector(vha, fdata); 1179 ret = qla24xx_erase_sector(vha, fdata);
1158 if (ret != QLA_SUCCESS) { 1180 if (ret != QLA_SUCCESS) {
1159 DEBUG9(qla_printk(KERN_WARNING, ha, 1181 ql_dbg(ql_dbg_user, vha, 0x7007,
1160 "Unable to erase sector: address=%x.\n", 1182 "Unable to erase erase sector: address=%x.\n",
1161 faddr)); 1183 faddr);
1162 break; 1184 break;
1163 } 1185 }
1164 } 1186 }
@@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1172 flash_data_addr(ha, faddr), 1194 flash_data_addr(ha, faddr),
1173 OPTROM_BURST_DWORDS); 1195 OPTROM_BURST_DWORDS);
1174 if (ret != QLA_SUCCESS) { 1196 if (ret != QLA_SUCCESS) {
1175 qla_printk(KERN_WARNING, ha, 1197 ql_log(ql_log_warn, vha, 0x7097,
1176 "Unable to burst-write optrom segment " 1198 "Unable to burst-write optrom segment "
1177 "(%x/%x/%llx).\n", ret, 1199 "(%x/%x/%llx).\n", ret,
1178 flash_data_addr(ha, faddr), 1200 flash_data_addr(ha, faddr),
1179 (unsigned long long)optrom_dma); 1201 (unsigned long long)optrom_dma);
1180 qla_printk(KERN_WARNING, ha, 1202 ql_log(ql_log_warn, vha, 0x7098,
1181 "Reverting to slow-write.\n"); 1203 "Reverting to slow-write.\n");
1182 1204
1183 dma_free_coherent(&ha->pdev->dev, 1205 dma_free_coherent(&ha->pdev->dev,
@@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1194 ret = qla24xx_write_flash_dword(ha, 1216 ret = qla24xx_write_flash_dword(ha,
1195 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr)); 1217 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
1196 if (ret != QLA_SUCCESS) { 1218 if (ret != QLA_SUCCESS) {
1197 DEBUG9(printk("%s(%ld) Unable to program flash " 1219 ql_dbg(ql_dbg_user, vha, 0x7006,
1198 "address=%x data=%x.\n", __func__, 1220 "Unable to program flash address=%x data=%x.\n",
1199 vha->host_no, faddr, *dwptr)); 1221 faddr, *dwptr);
1200 break; 1222 break;
1201 } 1223 }
1202 1224
@@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1211 1233
1212 ret = qla24xx_protect_flash(vha); 1234 ret = qla24xx_protect_flash(vha);
1213 if (ret != QLA_SUCCESS) 1235 if (ret != QLA_SUCCESS)
1214 qla_printk(KERN_WARNING, ha, 1236 ql_log(ql_log_warn, vha, 0x7099,
1215 "Unable to protect flash after update.\n"); 1237 "Unable to protect flash after update.\n");
1216done: 1238done:
1217 if (optrom) 1239 if (optrom)
@@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1324 ret = qla24xx_write_flash_dword(ha, 1346 ret = qla24xx_write_flash_dword(ha,
1325 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1347 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1326 if (ret != QLA_SUCCESS) { 1348 if (ret != QLA_SUCCESS) {
1327 DEBUG9(qla_printk(KERN_WARNING, ha, 1349 ql_dbg(ql_dbg_user, vha, 0x709a,
1328 "Unable to program nvram address=%x data=%x.\n", 1350 "Unable to program nvram address=%x data=%x.\n",
1329 naddr, *dwptr)); 1351 naddr, *dwptr);
1330 break; 1352 break;
1331 } 1353 }
1332 } 1354 }
@@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
1476 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 1498 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
1477 1499
1478 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1500 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1479 qla_printk(KERN_WARNING, ha, 1501 ql_log(ql_log_warn, vha, 0x709b,
1480 "Unable to update fw options (beacon on).\n"); 1502 "Unable to update fw options (beacon on).\n");
1481 return QLA_FUNCTION_FAILED; 1503 return QLA_FUNCTION_FAILED;
1482 } 1504 }
@@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha)
1541 1563
1542 rval = qla2x00_set_fw_options(vha, ha->fw_options); 1564 rval = qla2x00_set_fw_options(vha, ha->fw_options);
1543 if (rval != QLA_SUCCESS) 1565 if (rval != QLA_SUCCESS)
1544 qla_printk(KERN_WARNING, ha, 1566 ql_log(ql_log_warn, vha, 0x709c,
1545 "Unable to update fw options (beacon off).\n"); 1567 "Unable to update fw options (beacon off).\n");
1546 return rval; 1568 return rval;
1547} 1569}
@@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1616 1638
1617 if (qla2x00_get_fw_options(vha, ha->fw_options) != 1639 if (qla2x00_get_fw_options(vha, ha->fw_options) !=
1618 QLA_SUCCESS) { 1640 QLA_SUCCESS) {
1619 qla_printk(KERN_WARNING, ha, 1641 ql_log(ql_log_warn, vha, 0x7009,
1620 "Unable to update fw options (beacon on).\n"); 1642 "Unable to update fw options (beacon on).\n");
1621 return QLA_FUNCTION_FAILED; 1643 return QLA_FUNCTION_FAILED;
1622 } 1644 }
@@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1670 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 1692 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
1671 1693
1672 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1694 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1673 qla_printk(KERN_WARNING, ha, 1695 ql_log(ql_log_warn, vha, 0x704d,
1674 "Unable to update fw options (beacon off).\n"); 1696 "Unable to update fw options (beacon on).\n");
1675 return QLA_FUNCTION_FAILED; 1697 return QLA_FUNCTION_FAILED;
1676 } 1698 }
1677 1699
1678 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1700 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1679 qla_printk(KERN_WARNING, ha, 1701 ql_log(ql_log_warn, vha, 0x704e,
1680 "Unable to get fw options (beacon off).\n"); 1702 "Unable to update fw options (beacon on).\n");
1681 return QLA_FUNCTION_FAILED; 1703 return QLA_FUNCTION_FAILED;
1682 } 1704 }
1683 1705
@@ -2389,10 +2411,9 @@ try_fast:
2389 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2411 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
2390 &optrom_dma, GFP_KERNEL); 2412 &optrom_dma, GFP_KERNEL);
2391 if (!optrom) { 2413 if (!optrom) {
2392 qla_printk(KERN_DEBUG, ha, 2414 ql_log(ql_log_warn, vha, 0x00cc,
2393 "Unable to allocate memory for optrom burst read " 2415 "Unable to allocate memory for optrom burst read (%x KB).\n",
2394 "(%x KB).\n", OPTROM_BURST_SIZE / 1024); 2416 OPTROM_BURST_SIZE / 1024);
2395
2396 goto slow_read; 2417 goto slow_read;
2397 } 2418 }
2398 2419
@@ -2407,12 +2428,11 @@ try_fast:
2407 rval = qla2x00_dump_ram(vha, optrom_dma, 2428 rval = qla2x00_dump_ram(vha, optrom_dma,
2408 flash_data_addr(ha, faddr), burst); 2429 flash_data_addr(ha, faddr), burst);
2409 if (rval) { 2430 if (rval) {
2410 qla_printk(KERN_WARNING, ha, 2431 ql_log(ql_log_warn, vha, 0x00f5,
2411 "Unable to burst-read optrom segment " 2432 "Unable to burst-read optrom segment (%x/%x/%llx).\n",
2412 "(%x/%x/%llx).\n", rval, 2433 rval, flash_data_addr(ha, faddr),
2413 flash_data_addr(ha, faddr),
2414 (unsigned long long)optrom_dma); 2434 (unsigned long long)optrom_dma);
2415 qla_printk(KERN_WARNING, ha, 2435 ql_log(ql_log_warn, vha, 0x00f6,
2416 "Reverting to slow-read.\n"); 2436 "Reverting to slow-read.\n");
2417 2437
2418 dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2438 dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
@@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2556 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || 2576 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
2557 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { 2577 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
2558 /* No signature */ 2578 /* No signature */
2559 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " 2579 ql_log(ql_log_fatal, vha, 0x0050,
2560 "signature.\n")); 2580 "No matching ROM signature.\n");
2561 ret = QLA_FUNCTION_FAILED; 2581 ret = QLA_FUNCTION_FAILED;
2562 break; 2582 break;
2563 } 2583 }
@@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2573 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || 2593 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
2574 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { 2594 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
2575 /* Incorrect header. */ 2595 /* Incorrect header. */
2576 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " 2596 ql_log(ql_log_fatal, vha, 0x0051,
2577 "found pcir_adr=%x.\n", pcids)); 2597 "PCI data struct not found pcir_adr=%x.\n", pcids);
2578 ret = QLA_FUNCTION_FAILED; 2598 ret = QLA_FUNCTION_FAILED;
2579 break; 2599 break;
2580 } 2600 }
@@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2588 qla2x00_read_flash_byte(ha, pcids + 0x12); 2608 qla2x00_read_flash_byte(ha, pcids + 0x12);
2589 ha->bios_revision[1] = 2609 ha->bios_revision[1] =
2590 qla2x00_read_flash_byte(ha, pcids + 0x13); 2610 qla2x00_read_flash_byte(ha, pcids + 0x13);
2591 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", 2611 ql_dbg(ql_dbg_init, vha, 0x0052,
2592 ha->bios_revision[1], ha->bios_revision[0])); 2612 "Read BIOS %d.%d.\n",
2613 ha->bios_revision[1], ha->bios_revision[0]);
2593 break; 2614 break;
2594 case ROM_CODE_TYPE_FCODE: 2615 case ROM_CODE_TYPE_FCODE:
2595 /* Open Firmware standard for PCI (FCode). */ 2616 /* Open Firmware standard for PCI (FCode). */
@@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2602 qla2x00_read_flash_byte(ha, pcids + 0x12); 2623 qla2x00_read_flash_byte(ha, pcids + 0x12);
2603 ha->efi_revision[1] = 2624 ha->efi_revision[1] =
2604 qla2x00_read_flash_byte(ha, pcids + 0x13); 2625 qla2x00_read_flash_byte(ha, pcids + 0x13);
2605 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", 2626 ql_dbg(ql_dbg_init, vha, 0x0053,
2606 ha->efi_revision[1], ha->efi_revision[0])); 2627 "Read EFI %d.%d.\n",
2628 ha->efi_revision[1], ha->efi_revision[0]);
2607 break; 2629 break;
2608 default: 2630 default:
2609 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " 2631 ql_log(ql_log_warn, vha, 0x0054,
2610 "type %x at pcids %x.\n", code_type, pcids)); 2632 "Unrecognized code type %x at pcids %x.\n",
2633 code_type, pcids);
2611 break; 2634 break;
2612 } 2635 }
2613 2636
@@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2627 2650
2628 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 2651 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2629 8); 2652 8);
2630 DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from " 2653 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
2631 "flash:\n")); 2654 "Dumping fw "
2632 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); 2655 "ver from flash:.\n");
2656 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
2657 (uint8_t *)dbyte, 8);
2633 2658
2634 if ((dcode[0] == 0xffff && dcode[1] == 0xffff && 2659 if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
2635 dcode[2] == 0xffff && dcode[3] == 0xffff) || 2660 dcode[2] == 0xffff && dcode[3] == 0xffff) ||
2636 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2661 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2637 dcode[3] == 0)) { 2662 dcode[3] == 0)) {
2638 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " 2663 ql_log(ql_log_warn, vha, 0x0057,
2639 "revision at %x.\n", ha->flt_region_fw * 4)); 2664 "Unrecognized fw revision at %x.\n",
2665 ha->flt_region_fw * 4);
2640 } else { 2666 } else {
2641 /* values are in big endian */ 2667 /* values are in big endian */
2642 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2668 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
2643 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3]; 2669 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
2644 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5]; 2670 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
2671 ql_dbg(ql_dbg_init, vha, 0x0058,
2672 "FW Version: "
2673 "%d.%d.%d.\n", ha->fw_revision[0],
2674 ha->fw_revision[1], ha->fw_revision[2]);
2645 } 2675 }
2646 } 2676 }
2647 2677
@@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2683 bcode = mbuf + (pcihdr % 4); 2713 bcode = mbuf + (pcihdr % 4);
2684 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { 2714 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
2685 /* No signature */ 2715 /* No signature */
2686 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " 2716 ql_log(ql_log_fatal, vha, 0x0059,
2687 "signature.\n")); 2717 "No matching ROM signature.\n");
2688 ret = QLA_FUNCTION_FAILED; 2718 ret = QLA_FUNCTION_FAILED;
2689 break; 2719 break;
2690 } 2720 }
@@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2699 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || 2729 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2700 bcode[0x2] != 'I' || bcode[0x3] != 'R') { 2730 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2701 /* Incorrect header. */ 2731 /* Incorrect header. */
2702 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " 2732 ql_log(ql_log_fatal, vha, 0x005a,
2703 "found pcir_adr=%x.\n", pcids)); 2733 "PCI data struct not found pcir_adr=%x.\n", pcids);
2704 ret = QLA_FUNCTION_FAILED; 2734 ret = QLA_FUNCTION_FAILED;
2705 break; 2735 break;
2706 } 2736 }
@@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2712 /* Intel x86, PC-AT compatible. */ 2742 /* Intel x86, PC-AT compatible. */
2713 ha->bios_revision[0] = bcode[0x12]; 2743 ha->bios_revision[0] = bcode[0x12];
2714 ha->bios_revision[1] = bcode[0x13]; 2744 ha->bios_revision[1] = bcode[0x13];
2715 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", 2745 ql_dbg(ql_dbg_init, vha, 0x005b,
2716 ha->bios_revision[1], ha->bios_revision[0])); 2746 "Read BIOS %d.%d.\n",
2747 ha->bios_revision[1], ha->bios_revision[0]);
2717 break; 2748 break;
2718 case ROM_CODE_TYPE_FCODE: 2749 case ROM_CODE_TYPE_FCODE:
2719 /* Open Firmware standard for PCI (FCode). */ 2750 /* Open Firmware standard for PCI (FCode). */
2720 ha->fcode_revision[0] = bcode[0x12]; 2751 ha->fcode_revision[0] = bcode[0x12];
2721 ha->fcode_revision[1] = bcode[0x13]; 2752 ha->fcode_revision[1] = bcode[0x13];
2722 DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n", 2753 ql_dbg(ql_dbg_init, vha, 0x005c,
2723 ha->fcode_revision[1], ha->fcode_revision[0])); 2754 "Read FCODE %d.%d.\n",
2755 ha->fcode_revision[1], ha->fcode_revision[0]);
2724 break; 2756 break;
2725 case ROM_CODE_TYPE_EFI: 2757 case ROM_CODE_TYPE_EFI:
2726 /* Extensible Firmware Interface (EFI). */ 2758 /* Extensible Firmware Interface (EFI). */
2727 ha->efi_revision[0] = bcode[0x12]; 2759 ha->efi_revision[0] = bcode[0x12];
2728 ha->efi_revision[1] = bcode[0x13]; 2760 ha->efi_revision[1] = bcode[0x13];
2729 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", 2761 ql_dbg(ql_dbg_init, vha, 0x005d,
2730 ha->efi_revision[1], ha->efi_revision[0])); 2762 "Read EFI %d.%d.\n",
2763 ha->efi_revision[1], ha->efi_revision[0]);
2731 break; 2764 break;
2732 default: 2765 default:
2733 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " 2766 ql_log(ql_log_warn, vha, 0x005e,
2734 "type %x at pcids %x.\n", code_type, pcids)); 2767 "Unrecognized code type %x at pcids %x.\n",
2768 code_type, pcids);
2735 break; 2769 break;
2736 } 2770 }
2737 2771
@@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2753 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 2787 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
2754 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2788 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2755 dcode[3] == 0)) { 2789 dcode[3] == 0)) {
2756 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " 2790 ql_log(ql_log_warn, vha, 0x005f,
2757 "revision at %x.\n", ha->flt_region_fw * 4)); 2791 "Unrecognized fw revision at %x.\n",
2792 ha->flt_region_fw * 4);
2758 } else { 2793 } else {
2759 ha->fw_revision[0] = dcode[0]; 2794 ha->fw_revision[0] = dcode[0];
2760 ha->fw_revision[1] = dcode[1]; 2795 ha->fw_revision[1] = dcode[1];
2761 ha->fw_revision[2] = dcode[2]; 2796 ha->fw_revision[2] = dcode[2];
2762 ha->fw_revision[3] = dcode[3]; 2797 ha->fw_revision[3] = dcode[3];
2798 ql_dbg(ql_dbg_init, vha, 0x0060,
2799 "Firmware revision %d.%d.%d.%d.\n",
2800 ha->fw_revision[0], ha->fw_revision[1],
2801 ha->fw_revision[2], ha->fw_revision[3]);
2763 } 2802 }
2764 2803
2765 /* Check for golden firmware and get version if available */ 2804 /* Check for golden firmware and get version if available */
@@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2775 2814
2776 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF && 2815 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
2777 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) { 2816 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
2778 DEBUG2(qla_printk(KERN_INFO, ha, 2817 ql_log(ql_log_warn, vha, 0x0056,
2779 "%s(%ld): Unrecognized golden fw at 0x%x.\n", 2818 "Unrecognized golden fw at 0x%x.\n",
2780 __func__, vha->host_no, ha->flt_region_gold_fw * 4)); 2819 ha->flt_region_gold_fw * 4);
2781 return ret; 2820 return ret;
2782 } 2821 }
2783 2822
@@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2843 if (!ha->fcp_prio_cfg) { 2882 if (!ha->fcp_prio_cfg) {
2844 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 2883 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
2845 if (!ha->fcp_prio_cfg) { 2884 if (!ha->fcp_prio_cfg) {
2846 qla_printk(KERN_WARNING, ha, 2885 ql_log(ql_log_warn, vha, 0x00d5,
2847 "Unable to allocate memory for fcp priority data " 2886 "Unable to allocate memory for fcp priorty data (%x).\n",
2848 "(%x).\n", FCP_PRIO_CFG_SIZE); 2887 FCP_PRIO_CFG_SIZE);
2849 return QLA_FUNCTION_FAILED; 2888 return QLA_FUNCTION_FAILED;
2850 } 2889 }
2851 } 2890 }
@@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2857 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg, 2896 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
2858 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE); 2897 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
2859 2898
2860 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0)) 2899 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
2861 goto fail; 2900 goto fail;
2862 2901
2863 /* read remaining FCP CMD config data from flash */ 2902 /* read remaining FCP CMD config data from flash */
@@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2869 fcp_prio_addr << 2, (len < max_len ? len : max_len)); 2908 fcp_prio_addr << 2, (len < max_len ? len : max_len));
2870 2909
2871 /* revalidate the entire FCP priority config data, including entries */ 2910 /* revalidate the entire FCP priority config data, including entries */
2872 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1)) 2911 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
2873 goto fail; 2912 goto fail;
2874 2913
2875 ha->flags.fcp_prio_enabled = 1; 2914 ha->flags.fcp_prio_enabled = 1;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 28d9c9d6b4b4..fc3f168decb4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -137,6 +137,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
137 host->host_blocked = host->max_host_blocked; 137 host->host_blocked = host->max_host_blocked;
138 break; 138 break;
139 case SCSI_MLQUEUE_DEVICE_BUSY: 139 case SCSI_MLQUEUE_DEVICE_BUSY:
140 case SCSI_MLQUEUE_EH_RETRY:
140 device->device_blocked = device->max_device_blocked; 141 device->device_blocked = device->max_device_blocked;
141 break; 142 break;
142 case SCSI_MLQUEUE_TARGET_BUSY: 143 case SCSI_MLQUEUE_TARGET_BUSY:
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 8a172d4f4564..5fbeadd96819 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -597,6 +597,28 @@ static DEVICE_ATTR(signalling, S_IRUGO,
597 show_spi_host_signalling, 597 show_spi_host_signalling,
598 store_spi_host_signalling); 598 store_spi_host_signalling);
599 599
600static ssize_t show_spi_host_width(struct device *cdev,
601 struct device_attribute *attr,
602 char *buf)
603{
604 struct Scsi_Host *shost = transport_class_to_shost(cdev);
605
606 return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow");
607}
608static DEVICE_ATTR(host_width, S_IRUGO,
609 show_spi_host_width, NULL);
610
611static ssize_t show_spi_host_hba_id(struct device *cdev,
612 struct device_attribute *attr,
613 char *buf)
614{
615 struct Scsi_Host *shost = transport_class_to_shost(cdev);
616
617 return sprintf(buf, "%d\n", shost->this_id);
618}
619static DEVICE_ATTR(hba_id, S_IRUGO,
620 show_spi_host_hba_id, NULL);
621
600#define DV_SET(x, y) \ 622#define DV_SET(x, y) \
601 if(i->f->set_##x) \ 623 if(i->f->set_##x) \
602 i->f->set_##x(sdev->sdev_target, y) 624 i->f->set_##x(sdev->sdev_target, y)
@@ -1380,6 +1402,8 @@ static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
1380 1402
1381static struct attribute *host_attributes[] = { 1403static struct attribute *host_attributes[] = {
1382 &dev_attr_signalling.attr, 1404 &dev_attr_signalling.attr,
1405 &dev_attr_host_width.attr,
1406 &dev_attr_hba_id.attr,
1383 NULL 1407 NULL
1384}; 1408};
1385 1409