aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/bfa/bfa.h15
-rw-r--r--drivers/scsi/bfa/bfa_core.c28
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c58
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c18
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c130
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h3
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c53
-rw-r--r--drivers/scsi/bfa/bfa_modules.h2
-rw-r--r--drivers/scsi/bfa/bfi.h14
-rw-r--r--drivers/scsi/bfa/bfi_reg.h4
10 files changed, 188 insertions, 137 deletions
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 708bab093c14..8c73265f977d 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -228,7 +228,8 @@ struct bfa_hwif_s {
228 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); 228 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
229 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); 229 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
230 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); 230 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
231 void (*hw_msix_install)(struct bfa_s *bfa); 231 void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
232 void (*hw_msix_queue_install)(struct bfa_s *bfa);
232 void (*hw_msix_uninstall)(struct bfa_s *bfa); 233 void (*hw_msix_uninstall)(struct bfa_s *bfa);
233 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); 234 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
234 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, 235 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
@@ -271,8 +272,10 @@ struct bfa_iocfc_s {
271 bfa_ioc_portid(&(__bfa)->ioc) 272 bfa_ioc_portid(&(__bfa)->ioc)
272#define bfa_msix_init(__bfa, __nvecs) \ 273#define bfa_msix_init(__bfa, __nvecs) \
273 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) 274 ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
274#define bfa_msix_install(__bfa) \ 275#define bfa_msix_ctrl_install(__bfa) \
275 ((__bfa)->iocfc.hwif.hw_msix_install(__bfa)) 276 ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa))
277#define bfa_msix_queue_install(__bfa) \
278 ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
276#define bfa_msix_uninstall(__bfa) \ 279#define bfa_msix_uninstall(__bfa) \
277 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) 280 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
278#define bfa_isr_mode_set(__bfa, __msix) do { \ 281#define bfa_isr_mode_set(__bfa, __msix) do { \
@@ -314,7 +317,8 @@ void bfa_hwcb_reginit(struct bfa_s *bfa);
314void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq); 317void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
315void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); 318void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
316void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); 319void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
317void bfa_hwcb_msix_install(struct bfa_s *bfa); 320void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
321void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
318void bfa_hwcb_msix_uninstall(struct bfa_s *bfa); 322void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
319void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); 323void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
320void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, 324void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -326,7 +330,8 @@ void bfa_hwct2_reginit(struct bfa_s *bfa);
326void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 330void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
327void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 331void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
328void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); 332void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
329void bfa_hwct_msix_install(struct bfa_s *bfa); 333void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
334void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
330void bfa_hwct_msix_uninstall(struct bfa_s *bfa); 335void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
331void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); 336void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
332void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, 337void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 4c9e83803cb6..0dbdd2da5b48 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -248,7 +248,7 @@ bfa_intx(struct bfa_s *bfa)
248 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 248 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
249 249
250 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 250 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
251 if (intr & (__HFN_INT_RME_Q0 << queue)) 251 if ((intr & (__HFN_INT_RME_Q0 << queue)) && bfa->queue_process)
252 bfa_isr_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); 252 bfa_isr_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
253 } 253 }
254 intr &= ~qintr; 254 intr &= ~qintr;
@@ -262,7 +262,7 @@ bfa_intx(struct bfa_s *bfa)
262 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 262 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
263 263
264 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { 264 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
265 if (intr & (__HFN_INT_CPE_Q0 << queue)) 265 if ((intr & (__HFN_INT_CPE_Q0 << queue)) && bfa->queue_process)
266 bfa_isr_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); 266 bfa_isr_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
267 } 267 }
268 intr &= ~qintr; 268 intr &= ~qintr;
@@ -282,7 +282,7 @@ bfa_isr_enable(struct bfa_s *bfa)
282 282
283 bfa_trc(bfa, pci_func); 283 bfa_trc(bfa, pci_func);
284 284
285 bfa_msix_install(bfa); 285 bfa_msix_ctrl_install(bfa);
286 286
287 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { 287 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
288 umsk = __HFN_INT_ERR_MASK_CT2; 288 umsk = __HFN_INT_ERR_MASK_CT2;
@@ -326,9 +326,6 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
326void 326void
327bfa_msix_rspq(struct bfa_s *bfa, int vec) 327bfa_msix_rspq(struct bfa_s *bfa, int vec)
328{ 328{
329 if (!bfa->rme_process)
330 return;
331
332 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); 329 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
333} 330}
334 331
@@ -512,7 +509,8 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
512 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; 509 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
513 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 510 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
514 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 511 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
515 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; 512 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
513 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
516 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; 514 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
517 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; 515 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
518 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 516 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
@@ -524,7 +522,8 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
524 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; 522 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
525 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 523 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
526 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 524 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
527 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; 525 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
526 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
528 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; 527 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
529 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; 528 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
530 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; 529 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
@@ -640,7 +639,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
640{ 639{
641 int i; 640 int i;
642 641
643 bfa->rme_process = BFA_TRUE; 642 bfa->queue_process = BFA_TRUE;
644 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 643 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
645 bfa->iocfc.hwif.hw_rspq_ack(bfa, i); 644 bfa->iocfc.hwif.hw_rspq_ack(bfa, i);
646 645
@@ -743,6 +742,11 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
743 bfa_iocfc_qreg(bfa, &cfgrsp->qreg); 742 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
744 743
745 /* 744 /*
745 * Install MSIX queue handlers
746 */
747 bfa_msix_queue_install(bfa);
748
749 /*
746 * Configuration is complete - initialize/start submodules 750 * Configuration is complete - initialize/start submodules
747 */ 751 */
748 bfa_fcport_init(bfa); 752 bfa_fcport_init(bfa);
@@ -813,7 +817,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
813{ 817{
814 struct bfa_s *bfa = bfa_arg; 818 struct bfa_s *bfa = bfa_arg;
815 819
816 bfa->rme_process = BFA_FALSE; 820 bfa->queue_process = BFA_FALSE;
817 821
818 bfa_isr_disable(bfa); 822 bfa_isr_disable(bfa);
819 bfa_iocfc_disable_submod(bfa); 823 bfa_iocfc_disable_submod(bfa);
@@ -917,7 +921,7 @@ bfa_iocfc_stop(struct bfa_s *bfa)
917{ 921{
918 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 922 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
919 923
920 bfa->rme_process = BFA_FALSE; 924 bfa->queue_process = BFA_FALSE;
921 bfa_ioc_disable(&bfa->ioc); 925 bfa_ioc_disable(&bfa->ioc);
922} 926}
923 927
@@ -1017,7 +1021,7 @@ bfa_iocfc_disable(struct bfa_s *bfa)
1017 "IOC Disable"); 1021 "IOC Disable");
1018 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; 1022 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1019 1023
1020 bfa->rme_process = BFA_FALSE; 1024 bfa->queue_process = BFA_FALSE;
1021 bfa_ioc_disable(&bfa->ioc); 1025 bfa_ioc_disable(&bfa->ioc);
1022} 1026}
1023 1027
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index 4ef3cf2e7d98..15fbb13df96c 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -86,43 +86,71 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
86} 86}
87 87
88/* 88/*
89 * Dummy interrupt handler for handling spurious interrupts.
90 */
91static void
92bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
93{
94}
95
96/*
89 * No special setup required for crossbow -- vector assignments are implicit. 97 * No special setup required for crossbow -- vector assignments are implicit.
90 */ 98 */
91void 99void
92bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) 100bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
93{ 101{
94 int i;
95
96 WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS)); 102 WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
97 103
98 bfa->msix.nvecs = nvecs; 104 bfa->msix.nvecs = nvecs;
99 if (nvecs == 1) { 105 bfa_hwcb_msix_uninstall(bfa);
100 for (i = 0; i < BFI_MSIX_CB_MAX; i++) 106}
107
108void
109bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
110{
111 int i;
112
113 if (bfa->msix.nvecs == 0)
114 return;
115
116 if (bfa->msix.nvecs == 1) {
117 for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
101 bfa->msix.handler[i] = bfa_msix_all; 118 bfa->msix.handler[i] = bfa_msix_all;
102 return; 119 return;
103 } 120 }
104 121
105 for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++) 122 for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
106 bfa->msix.handler[i] = bfa_msix_reqq;
107
108 for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
109 bfa->msix.handler[i] = bfa_msix_rspq;
110
111 for (; i < BFI_MSIX_CB_MAX; i++)
112 bfa->msix.handler[i] = bfa_msix_lpu_err; 123 bfa->msix.handler[i] = bfa_msix_lpu_err;
113} 124}
114 125
115/*
116 * Crossbow -- dummy, interrupts are masked
117 */
118void 126void
119bfa_hwcb_msix_install(struct bfa_s *bfa) 127bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
120{ 128{
129 int i;
130
131 if (bfa->msix.nvecs == 0)
132 return;
133
134 if (bfa->msix.nvecs == 1) {
135 for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
136 bfa->msix.handler[i] = bfa_msix_all;
137 return;
138 }
139
140 for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
141 bfa->msix.handler[i] = bfa_msix_reqq;
142
143 for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
144 bfa->msix.handler[i] = bfa_msix_rspq;
121} 145}
122 146
123void 147void
124bfa_hwcb_msix_uninstall(struct bfa_s *bfa) 148bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
125{ 149{
150 int i;
151
152 for (i = 0; i < BFI_MSIX_CB_MAX; i++)
153 bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
126} 154}
127 155
128/* 156/*
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 66e2d29ff45a..989bbce9b296 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -96,7 +96,19 @@ bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
96} 96}
97 97
98void 98void
99bfa_hwct_msix_install(struct bfa_s *bfa) 99bfa_hwct_msix_ctrl_install(struct bfa_s *bfa)
100{
101 if (bfa->msix.nvecs == 0)
102 return;
103
104 if (bfa->msix.nvecs == 1)
105 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all;
106 else
107 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err;
108}
109
110void
111bfa_hwct_msix_queue_install(struct bfa_s *bfa)
100{ 112{
101 int i; 113 int i;
102 114
@@ -104,7 +116,7 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
104 return; 116 return;
105 117
106 if (bfa->msix.nvecs == 1) { 118 if (bfa->msix.nvecs == 1) {
107 for (i = 0; i < BFI_MSIX_CT_MAX; i++) 119 for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++)
108 bfa->msix.handler[i] = bfa_msix_all; 120 bfa->msix.handler[i] = bfa_msix_all;
109 return; 121 return;
110 } 122 }
@@ -114,8 +126,6 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
114 126
115 for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++) 127 for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++)
116 bfa->msix.handler[i] = bfa_msix_rspq; 128 bfa->msix.handler[i] = bfa_msix_rspq;
117
118 bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err;
119} 129}
120 130
121void 131void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 3d336c279c42..9c6e493cb9c7 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -29,8 +29,8 @@ BFA_TRC_FILE(CNA, IOC);
29#define BFA_IOC_TOV 3000 /* msecs */ 29#define BFA_IOC_TOV 3000 /* msecs */
30#define BFA_IOC_HWSEM_TOV 500 /* msecs */ 30#define BFA_IOC_HWSEM_TOV 500 /* msecs */
31#define BFA_IOC_HB_TOV 500 /* msecs */ 31#define BFA_IOC_HB_TOV 500 /* msecs */
32#define BFA_IOC_HWINIT_MAX 5
33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV 32#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
33#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
34 34
35#define bfa_ioc_timer_start(__ioc) \ 35#define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
@@ -79,6 +79,7 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE;
79static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 79static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
80static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 80static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
81static void bfa_ioc_timeout(void *ioc); 81static void bfa_ioc_timeout(void *ioc);
82static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
82static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); 83static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
83static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); 84static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
84static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); 85static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
@@ -107,11 +108,10 @@ enum ioc_event {
107 IOC_E_ENABLED = 5, /* f/w enabled */ 108 IOC_E_ENABLED = 5, /* f/w enabled */
108 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ 109 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
109 IOC_E_DISABLED = 7, /* f/w disabled */ 110 IOC_E_DISABLED = 7, /* f/w disabled */
110 IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */ 111 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
111 IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */ 112 IOC_E_HBFAIL = 9, /* heartbeat failure */
112 IOC_E_HBFAIL = 10, /* heartbeat failure */ 113 IOC_E_HWERROR = 10, /* hardware error interrupt */
113 IOC_E_HWERROR = 11, /* hardware error interrupt */ 114 IOC_E_TIMEOUT = 11, /* timeout */
114 IOC_E_TIMEOUT = 12, /* timeout */
115}; 115};
116 116
117bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); 117bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -145,9 +145,9 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
145 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV) 145 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
146#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) 146#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
147 147
148#define bfa_iocpf_recovery_timer_start(__ioc) \ 148#define bfa_iocpf_poll_timer_start(__ioc) \
149 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 149 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
150 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER) 150 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
151 151
152#define bfa_sem_timer_start(__ioc) \ 152#define bfa_sem_timer_start(__ioc) \
153 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \ 153 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
@@ -159,6 +159,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
159 */ 159 */
160static void bfa_iocpf_timeout(void *ioc_arg); 160static void bfa_iocpf_timeout(void *ioc_arg);
161static void bfa_iocpf_sem_timeout(void *ioc_arg); 161static void bfa_iocpf_sem_timeout(void *ioc_arg);
162static void bfa_iocpf_poll_timeout(void *ioc_arg);
162 163
163/* 164/*
164 * IOCPF state machine events 165 * IOCPF state machine events
@@ -316,7 +317,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
316 /* !!! fall through !!! */ 317 /* !!! fall through !!! */
317 case IOC_E_HWERROR: 318 case IOC_E_HWERROR:
318 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 319 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
319 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 320 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
320 if (event != IOC_E_PFFAILED) 321 if (event != IOC_E_PFFAILED)
321 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 322 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
322 break; 323 break;
@@ -368,7 +369,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
368 /* !!! fall through !!! */ 369 /* !!! fall through !!! */
369 case IOC_E_TIMEOUT: 370 case IOC_E_TIMEOUT:
370 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 371 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
371 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 372 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
372 if (event != IOC_E_PFFAILED) 373 if (event != IOC_E_PFFAILED)
373 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); 374 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
374 break; 375 break;
@@ -417,13 +418,13 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
417 bfa_hb_timer_stop(ioc); 418 bfa_hb_timer_stop(ioc);
418 /* !!! fall through !!! */ 419 /* !!! fall through !!! */
419 case IOC_E_HBFAIL: 420 case IOC_E_HBFAIL:
420 bfa_ioc_fail_notify(ioc);
421
422 if (ioc->iocpf.auto_recover) 421 if (ioc->iocpf.auto_recover)
423 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 422 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
424 else 423 else
425 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 424 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
426 425
426 bfa_ioc_fail_notify(ioc);
427
427 if (event != IOC_E_PFFAILED) 428 if (event != IOC_E_PFFAILED)
428 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); 429 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
429 break; 430 break;
@@ -528,14 +529,11 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
528 * Initialization retry failed. 529 * Initialization retry failed.
529 */ 530 */
530 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 531 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
532 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
531 if (event != IOC_E_PFFAILED) 533 if (event != IOC_E_PFFAILED)
532 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 534 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
533 break; 535 break;
534 536
535 case IOC_E_INITFAILED:
536 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
537 break;
538
539 case IOC_E_ENABLE: 537 case IOC_E_ENABLE:
540 break; 538 break;
541 539
@@ -603,7 +601,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
603static void 601static void
604bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) 602bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
605{ 603{
606 iocpf->retry_count = 0; 604 iocpf->fw_mismatch_notified = BFA_FALSE;
607 iocpf->auto_recover = bfa_auto_recover; 605 iocpf->auto_recover = bfa_auto_recover;
608} 606}
609 607
@@ -653,7 +651,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
653 case IOCPF_E_SEMLOCKED: 651 case IOCPF_E_SEMLOCKED:
654 if (bfa_ioc_firmware_lock(ioc)) { 652 if (bfa_ioc_firmware_lock(ioc)) {
655 if (bfa_ioc_sync_start(ioc)) { 653 if (bfa_ioc_sync_start(ioc)) {
656 iocpf->retry_count = 0;
657 bfa_ioc_sync_join(ioc); 654 bfa_ioc_sync_join(ioc);
658 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 655 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
659 } else { 656 } else {
@@ -692,10 +689,10 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
692 /* 689 /*
693 * Call only the first time sm enters fwmismatch state. 690 * Call only the first time sm enters fwmismatch state.
694 */ 691 */
695 if (iocpf->retry_count == 0) 692 if (iocpf->fw_mismatch_notified == BFA_FALSE)
696 bfa_ioc_pf_fwmismatch(iocpf->ioc); 693 bfa_ioc_pf_fwmismatch(iocpf->ioc);
697 694
698 iocpf->retry_count++; 695 iocpf->fw_mismatch_notified = BFA_TRUE;
699 bfa_iocpf_timer_start(iocpf->ioc); 696 bfa_iocpf_timer_start(iocpf->ioc);
700} 697}
701 698
@@ -773,7 +770,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
773static void 770static void
774bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) 771bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
775{ 772{
776 bfa_iocpf_timer_start(iocpf->ioc); 773 iocpf->poll_time = 0;
777 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); 774 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
778} 775}
779 776
@@ -790,20 +787,12 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
790 787
791 switch (event) { 788 switch (event) {
792 case IOCPF_E_FWREADY: 789 case IOCPF_E_FWREADY:
793 bfa_iocpf_timer_stop(ioc);
794 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 790 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
795 break; 791 break;
796 792
797 case IOCPF_E_INITFAIL:
798 bfa_iocpf_timer_stop(ioc);
799 /*
800 * !!! fall through !!!
801 */
802
803 case IOCPF_E_TIMEOUT: 793 case IOCPF_E_TIMEOUT:
804 writel(1, ioc->ioc_regs.ioc_sem_reg); 794 writel(1, ioc->ioc_regs.ioc_sem_reg);
805 if (event == IOCPF_E_TIMEOUT) 795 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
806 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
807 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 796 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
808 break; 797 break;
809 798
@@ -823,6 +812,10 @@ static void
823bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) 812bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
824{ 813{
825 bfa_iocpf_timer_start(iocpf->ioc); 814 bfa_iocpf_timer_start(iocpf->ioc);
815 /*
816 * Enable Interrupts before sending fw IOC ENABLE cmd.
817 */
818 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
826 bfa_ioc_send_enable(iocpf->ioc); 819 bfa_ioc_send_enable(iocpf->ioc);
827} 820}
828 821
@@ -863,10 +856,6 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
863 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
864 break; 857 break;
865 858
866 case IOCPF_E_FWREADY:
867 bfa_ioc_send_enable(ioc);
868 break;
869
870 default: 859 default:
871 bfa_sm_fault(ioc, event); 860 bfa_sm_fault(ioc, event);
872 } 861 }
@@ -898,16 +887,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
898 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 887 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
899 break; 888 break;
900 889
901 case IOCPF_E_FWREADY:
902 if (bfa_ioc_is_operational(ioc)) {
903 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
904 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
905 } else {
906 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
907 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
908 }
909 break;
910
911 default: 890 default:
912 bfa_sm_fault(ioc, event); 891 bfa_sm_fault(ioc, event);
913 } 892 }
@@ -932,7 +911,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
932 911
933 switch (event) { 912 switch (event) {
934 case IOCPF_E_FWRSP_DISABLE: 913 case IOCPF_E_FWRSP_DISABLE:
935 case IOCPF_E_FWREADY:
936 bfa_iocpf_timer_stop(ioc); 914 bfa_iocpf_timer_stop(ioc);
937 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 915 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
938 break; 916 break;
@@ -1005,7 +983,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1005 983
1006 switch (event) { 984 switch (event) {
1007 case IOCPF_E_ENABLE: 985 case IOCPF_E_ENABLE:
1008 iocpf->retry_count = 0;
1009 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 986 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1010 break; 987 break;
1011 988
@@ -1038,20 +1015,10 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1038 switch (event) { 1015 switch (event) {
1039 case IOCPF_E_SEMLOCKED: 1016 case IOCPF_E_SEMLOCKED:
1040 bfa_ioc_notify_fail(ioc); 1017 bfa_ioc_notify_fail(ioc);
1041 bfa_ioc_sync_ack(ioc); 1018 bfa_ioc_sync_leave(ioc);
1042 iocpf->retry_count++; 1019 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1043 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { 1020 writel(1, ioc->ioc_regs.ioc_sem_reg);
1044 bfa_ioc_sync_leave(ioc); 1021 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1045 writel(1, ioc->ioc_regs.ioc_sem_reg);
1046 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1047 } else {
1048 if (bfa_ioc_sync_complete(ioc))
1049 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1050 else {
1051 writel(1, ioc->ioc_regs.ioc_sem_reg);
1052 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1053 }
1054 }
1055 break; 1022 break;
1056 1023
1057 case IOCPF_E_DISABLE: 1024 case IOCPF_E_DISABLE:
@@ -1076,7 +1043,6 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1076static void 1043static void
1077bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) 1044bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1078{ 1045{
1079 bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
1080} 1046}
1081 1047
1082/* 1048/*
@@ -1129,11 +1095,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1129 1095
1130 switch (event) { 1096 switch (event) {
1131 case IOCPF_E_SEMLOCKED: 1097 case IOCPF_E_SEMLOCKED:
1132 iocpf->retry_count = 0;
1133 bfa_ioc_sync_ack(ioc); 1098 bfa_ioc_sync_ack(ioc);
1134 bfa_ioc_notify_fail(ioc); 1099 bfa_ioc_notify_fail(ioc);
1135 if (!iocpf->auto_recover) { 1100 if (!iocpf->auto_recover) {
1136 bfa_ioc_sync_leave(ioc); 1101 bfa_ioc_sync_leave(ioc);
1102 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1137 writel(1, ioc->ioc_regs.ioc_sem_reg); 1103 writel(1, ioc->ioc_regs.ioc_sem_reg);
1138 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1104 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1139 } else { 1105 } else {
@@ -1441,7 +1407,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1441 * just wait for an initialization completion interrupt. 1407 * just wait for an initialization completion interrupt.
1442 */ 1408 */
1443 if (ioc_fwstate == BFI_IOC_INITING) { 1409 if (ioc_fwstate == BFI_IOC_INITING) {
1444 ioc->cbfn->reset_cbfn(ioc->bfa); 1410 bfa_ioc_poll_fwinit(ioc);
1445 return; 1411 return;
1446 } 1412 }
1447 1413
@@ -1460,7 +1426,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1460 * be flushed. Otherwise MSI-X interrupts are not delivered. 1426 * be flushed. Otherwise MSI-X interrupts are not delivered.
1461 */ 1427 */
1462 bfa_ioc_msgflush(ioc); 1428 bfa_ioc_msgflush(ioc);
1463 ioc->cbfn->reset_cbfn(ioc->bfa);
1464 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1429 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1465 return; 1430 return;
1466 } 1431 }
@@ -1902,11 +1867,6 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1902 1867
1903 bfa_ioc_msgflush(ioc); 1868 bfa_ioc_msgflush(ioc);
1904 bfa_ioc_download_fw(ioc, boot_type, boot_env); 1869 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1905
1906 /*
1907 * Enable interrupts just before starting LPU
1908 */
1909 ioc->cbfn->reset_cbfn(ioc->bfa);
1910 bfa_ioc_lpu_start(ioc); 1870 bfa_ioc_lpu_start(ioc);
1911} 1871}
1912 1872
@@ -1981,10 +1941,6 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1981 case BFI_IOC_I2H_HBEAT: 1941 case BFI_IOC_I2H_HBEAT:
1982 break; 1942 break;
1983 1943
1984 case BFI_IOC_I2H_READY_EVENT:
1985 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1986 break;
1987
1988 case BFI_IOC_I2H_ENABLE_REPLY: 1944 case BFI_IOC_I2H_ENABLE_REPLY:
1989 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 1945 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1990 break; 1946 break;
@@ -2752,6 +2708,34 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
2752 bfa_ioc_hw_sem_get(ioc); 2708 bfa_ioc_hw_sem_get(ioc);
2753} 2709}
2754 2710
2711static void
2712bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2713{
2714 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2715
2716 bfa_trc(ioc, fwstate);
2717
2718 if (fwstate == BFI_IOC_DISABLED) {
2719 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2720 return;
2721 }
2722
2723 if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2724 bfa_iocpf_timeout(ioc);
2725 else {
2726 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2727 bfa_iocpf_poll_timer_start(ioc);
2728 }
2729}
2730
2731static void
2732bfa_iocpf_poll_timeout(void *ioc_arg)
2733{
2734 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2735
2736 bfa_ioc_poll_fwinit(ioc);
2737}
2738
2755/* 2739/*
2756 * bfa timer function 2740 * bfa timer function
2757 */ 2741 */
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 39382ea21449..8c9bbdf26482 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -224,8 +224,9 @@ struct bfa_ioc_notify_s {
224struct bfa_iocpf_s { 224struct bfa_iocpf_s {
225 bfa_fsm_t fsm; 225 bfa_fsm_t fsm;
226 struct bfa_ioc_s *ioc; 226 struct bfa_ioc_s *ioc;
227 u32 retry_count; 227 bfa_boolean_t fw_mismatch_notified;
228 bfa_boolean_t auto_recover; 228 bfa_boolean_t auto_recover;
229 u32 poll_time;
229}; 230};
230 231
231struct bfa_ioc_s { 232struct bfa_ioc_s {
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 118ed8c7fc2e..6710a8016601 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -300,8 +300,8 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
300 ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG); 300 ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
301 ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG); 301 ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
302 ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG); 302 ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
303 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 303 ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
304 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); 304 ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
305 305
306 /* 306 /*
307 * sram memory access 307 * sram memory access
@@ -636,10 +636,10 @@ bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
636static struct { u32 sclk, speed, half_speed; } ct2_pll[] = { 636static struct { u32 sclk, speed, half_speed; } ct2_pll[] = {
637 {0}, /* unused */ 637 {0}, /* unused */
638 {__APP_PLL_SCLK_CLK_DIV2, 0, 0}, /* FC 8G */ 638 {__APP_PLL_SCLK_CLK_DIV2, 0, 0}, /* FC 8G */
639 {0, __APP_LPU_SPEED, 0}, /* FC 16G */ 639 {0, 0, 0}, /* FC 16G */
640 {__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2, 0, /* ETH */ 640 {__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2, 0, /* ETH */
641 __APP_LPUCLK_HALFSPEED}, 641 __APP_LPUCLK_HALFSPEED},
642 {0, __APP_LPU_SPEED, 0}, /* COMBO */ 642 {0, 0, 0}, /* COMBO */
643}; 643};
644 644
645static void 645static void
@@ -664,15 +664,13 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb, enum bfi_asic_mode mode)
664 writel(r32 | ct2_pll[mode].sclk, (rb + CT2_APP_PLL_SCLK_CTL_REG)); 664 writel(r32 | ct2_pll[mode].sclk, (rb + CT2_APP_PLL_SCLK_CTL_REG));
665 665
666 /* 666 /*
667 * remove clock gating for ethernet subsystem for ethernet mode 667 * while doing PLL init dont clock gate ethernet subsystem
668 */ 668 */
669 if (mode == BFI_ASIC_MODE_ETH) { 669 r32 = readl((rb + CT2_CHIP_MISC_PRG));
670 r32 = readl((rb + CT2_CHIP_MISC_PRG)); 670 writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
671 writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
672 671
673 r32 = readl((rb + CT2_PCIE_MISC_REG)); 672 r32 = readl((rb + CT2_PCIE_MISC_REG));
674 writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); 673 writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
675 }
676 674
677 /* 675 /*
678 * set sclk value 676 * set sclk value
@@ -693,6 +691,19 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb, enum bfi_asic_mode mode)
693 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); 691 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
694 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, 692 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
695 (rb + CT2_APP_PLL_SCLK_CTL_REG)); 693 (rb + CT2_APP_PLL_SCLK_CTL_REG));
694
695 /*
696 * clock gating for ethernet subsystem if not in ethernet mode
697 */
698 if (mode != BFI_ASIC_MODE_ETH) {
699 r32 = readl((rb + CT2_CHIP_MISC_PRG));
700 writel(r32 & ~__ETH_CLK_ENABLE_PORT0,
701 (rb + CT2_CHIP_MISC_PRG));
702
703 r32 = readl((rb + CT2_PCIE_MISC_REG));
704 writel(r32 & ~__ETH_CLK_ENABLE_PORT1,
705 (rb + CT2_PCIE_MISC_REG));
706 }
696} 707}
697 708
698static void 709static void
@@ -728,7 +739,8 @@ bfa_ioc_ct2_lclk_init(void __iomem *rb, enum bfi_asic_mode mode)
728 */ 739 */
729 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); 740 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
730 r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); 741 r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
731 if (mode == BFI_ASIC_MODE_FC || mode == BFI_ASIC_MODE_ETH) 742 if (mode == BFI_ASIC_MODE_FC || mode == BFI_ASIC_MODE_FC16 ||
743 mode == BFI_ASIC_MODE_ETH)
732 r32 |= 0x20c1731b; 744 r32 |= 0x20c1731b;
733 else 745 else
734 r32 |= 0x2081731b; 746 r32 |= 0x2081731b;
@@ -755,8 +767,10 @@ bfa_ioc_ct2_mem_init(void __iomem *rb, enum bfi_asic_mode mode)
755 767
756 fcmode = (mode == BFI_ASIC_MODE_FC) || (mode == BFI_ASIC_MODE_FC16); 768 fcmode = (mode == BFI_ASIC_MODE_FC) || (mode == BFI_ASIC_MODE_FC16);
757 if (!fcmode) { 769 if (!fcmode) {
758 writel(__PMM_1T_RESET_P, (rb + CT2_PMM_1T_CONTROL_REG_P0)); 770 writel(__PMM_1T_PNDB_P | __PMM_1T_RESET_P,
759 writel(__PMM_1T_RESET_P, (rb + CT2_PMM_1T_CONTROL_REG_P1)); 771 (rb + CT2_PMM_1T_CONTROL_REG_P0));
772 writel(__PMM_1T_PNDB_P | __PMM_1T_RESET_P,
773 (rb + CT2_PMM_1T_CONTROL_REG_P1));
760 } 774 }
761 775
762 r32 = readl((rb + PSS_CTL_REG)); 776 r32 = readl((rb + PSS_CTL_REG));
@@ -764,6 +778,11 @@ bfa_ioc_ct2_mem_init(void __iomem *rb, enum bfi_asic_mode mode)
764 writel(r32, (rb + PSS_CTL_REG)); 778 writel(r32, (rb + PSS_CTL_REG));
765 udelay(1000); 779 udelay(1000);
766 780
781 if (!fcmode) {
782 writel(__PMM_1T_PNDB_P, (rb + CT2_PMM_1T_CONTROL_REG_P0));
783 writel(__PMM_1T_PNDB_P, (rb + CT2_PMM_1T_CONTROL_REG_P1));
784 }
785
767 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); 786 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
768 udelay(1000); 787 udelay(1000);
769 writel(0, (rb + CT2_MBIST_CTL_REG)); 788 writel(0, (rb + CT2_MBIST_CTL_REG));
@@ -776,6 +795,12 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
776 bfa_ioc_ct2_lclk_init(rb, mode); 795 bfa_ioc_ct2_lclk_init(rb, mode);
777 bfa_ioc_ct2_mem_init(rb, mode); 796 bfa_ioc_ct2_mem_init(rb, mode);
778 797
798 /*
799 * Disable flash presence to NFC by clearing GPIO 0
800 */
801 writel(0, (rb + PSS_GPIO_OUT_REG));
802 writel(1, (rb + PSS_GPIO_OE_REG));
803
779 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); 804 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
780 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); 805 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
781 return BFA_STATUS_OK; 806 return BFA_STATUS_OK;
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index ab79ff6fdeea..4b67ea2ea407 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -109,7 +109,7 @@ struct bfa_s {
109 struct bfa_timer_mod_s timer_mod; /* timer module */ 109 struct bfa_timer_mod_s timer_mod; /* timer module */
110 struct bfa_modules_s modules; /* BFA modules */ 110 struct bfa_modules_s modules; /* BFA modules */
111 struct list_head comp_q; /* pending completions */ 111 struct list_head comp_q; /* pending completions */
112 bfa_boolean_t rme_process; /* RME processing enabled */ 112 bfa_boolean_t queue_process; /* queue processing enabled */
113 struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; 113 struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
114 bfa_boolean_t fcs; /* FCS is attached to BFA */ 114 bfa_boolean_t fcs; /* FCS is attached to BFA */
115 struct bfa_msix_s msix; 115 struct bfa_msix_s msix;
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 7096c5fd2ba0..e6383f23e065 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -221,8 +221,7 @@ enum bfi_ioc_i2h_msgs {
221 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), 221 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
222 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), 222 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
223 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), 223 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
224 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4), 224 BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
225 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
226}; 225};
227 226
228/* 227/*
@@ -318,15 +317,6 @@ enum bfi_port_mode {
318 BFI_PORT_MODE_ETH = 2, 317 BFI_PORT_MODE_ETH = 2,
319}; 318};
320 319
321/*
322 * BFI_IOC_I2H_READY_EVENT message
323 */
324struct bfi_ioc_rdy_event_s {
325 struct bfi_mhdr_s mh; /* common msg header */
326 u8 init_status; /* init event status */
327 u8 rsvd[3];
328};
329
330struct bfi_ioc_hbeat_s { 320struct bfi_ioc_hbeat_s {
331 struct bfi_mhdr_s mh; /* common msg header */ 321 struct bfi_mhdr_s mh; /* common msg header */
332 u32 hb_count; /* current heart beat count */ 322 u32 hb_count; /* current heart beat count */
@@ -418,7 +408,7 @@ union bfi_ioc_h2i_msg_u {
418 */ 408 */
419union bfi_ioc_i2h_msg_u { 409union bfi_ioc_i2h_msg_u {
420 struct bfi_mhdr_s mh; 410 struct bfi_mhdr_s mh;
421 struct bfi_ioc_rdy_event_s rdy_event; 411 struct bfi_ioc_ctrl_reply_s rdy_event;
422 u32 mboxmsg[BFI_IOC_MSGSZ]; 412 u32 mboxmsg[BFI_IOC_MSGSZ];
423}; 413};
424 414
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index 0e8b68540018..de4db72b39e9 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -137,6 +137,10 @@
137#define __PSS_LPU0_RESET 0x00000001 137#define __PSS_LPU0_RESET 0x00000001
138#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */ 138#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
139#define ERR_SET_REG 0x00018818 /* cb/ct */ 139#define ERR_SET_REG 0x00018818 /* cb/ct */
140#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
141#define __PSS_GPIO_OUT_REG 0x00000fff
142#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
143#define __PSS_GPIO_OE_REG 0x000000ff
140 144
141#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */ 145#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
142#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */ 146#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */