aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bfa
diff options
context:
space:
mode:
authorVijaya Mohan Guvva <vmohan@brocade.com>2013-05-13 05:33:26 -0400
committerJames Bottomley <JBottomley@Parallels.com>2013-06-26 16:12:19 -0400
commitc679b599afa5dd38d20e058aa68bc94c1c1416a1 (patch)
tree605da11cd1b832a1254ba81ba6934482c87065df /drivers/scsi/bfa
parentf2a0cc3ffd5ee123086b8e76522a85a937d89878 (diff)
[SCSI] bfa: kdump fix on 815 and 825 adapters
Root cause: When kernel crashes, On brocade 815/825 adapters, bfa IOC state machine and FW doesn't get a notification and hence are not cleanly shutdown. So registers holding driver/IOC state information are not reset back to valid disabled/parking values. This causes subsequent driver initialization to fail during kdump kernel boot. Fix description: during the initialization of first PCI function, reset corresponding register when unclean shutown is detect by reading chip registers. This will make sure that ioc/fw gets clean re-initialization. Signed-off-by: Vijaya Mohan Guvva <vmohan@brocade.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/bfa')
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c42
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h6
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c86
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c36
-rw-r--r--drivers/scsi/bfa/bfi.h4
5 files changed, 150 insertions, 24 deletions
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 0116c1032e25..8928b68551c3 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -67,6 +67,14 @@ BFA_TRC_FILE(CNA, IOC);
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) 67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68#define bfa_ioc_sync_complete(__ioc) \ 68#define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) 69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
70#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
71 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
72#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
73 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
74#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
75 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
76#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
77 ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
70 78
71#define bfa_ioc_mbox_cmd_pending(__ioc) \ 79#define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 80 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -698,7 +706,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
698 } 706 }
699 707
700 /* h/w sem init */ 708 /* h/w sem init */
701 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); 709 fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
702 if (fwstate == BFI_IOC_UNINIT) { 710 if (fwstate == BFI_IOC_UNINIT) {
703 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); 711 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
704 goto sem_get; 712 goto sem_get;
@@ -725,8 +733,8 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
725 733
726 bfa_trc(iocpf->ioc, fwstate); 734 bfa_trc(iocpf->ioc, fwstate);
727 bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); 735 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
728 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); 736 bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
729 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate); 737 bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
730 738
731 /* 739 /*
732 * Unlock the hw semaphore. Should be here only once per boot. 740 * Unlock the hw semaphore. Should be here only once per boot.
@@ -1037,7 +1045,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1037 */ 1045 */
1038 1046
1039 case IOCPF_E_TIMEOUT: 1047 case IOCPF_E_TIMEOUT:
1040 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 1048 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1041 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1049 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1042 break; 1050 break;
1043 1051
@@ -1138,7 +1146,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1138 case IOCPF_E_SEMLOCKED: 1146 case IOCPF_E_SEMLOCKED:
1139 bfa_ioc_notify_fail(ioc); 1147 bfa_ioc_notify_fail(ioc);
1140 bfa_ioc_sync_leave(ioc); 1148 bfa_ioc_sync_leave(ioc);
1141 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 1149 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1142 writel(1, ioc->ioc_regs.ioc_sem_reg); 1150 writel(1, ioc->ioc_regs.ioc_sem_reg);
1143 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 1151 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1144 break; 1152 break;
@@ -1227,7 +1235,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1227 bfa_ioc_notify_fail(ioc); 1235 bfa_ioc_notify_fail(ioc);
1228 if (!iocpf->auto_recover) { 1236 if (!iocpf->auto_recover) {
1229 bfa_ioc_sync_leave(ioc); 1237 bfa_ioc_sync_leave(ioc);
1230 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 1238 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1231 writel(1, ioc->ioc_regs.ioc_sem_reg); 1239 writel(1, ioc->ioc_regs.ioc_sem_reg);
1232 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1240 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1233 } else { 1241 } else {
@@ -1519,7 +1527,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1519 u32 boot_type; 1527 u32 boot_type;
1520 u32 boot_env; 1528 u32 boot_env;
1521 1529
1522 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1530 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1523 1531
1524 if (force) 1532 if (force)
1525 ioc_fwstate = BFI_IOC_UNINIT; 1533 ioc_fwstate = BFI_IOC_UNINIT;
@@ -2006,11 +2014,11 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2006 * Initialize IOC state of all functions on a chip reset. 2014 * Initialize IOC state of all functions on a chip reset.
2007 */ 2015 */
2008 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { 2016 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2009 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate); 2017 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2010 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate); 2018 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2011 } else { 2019 } else {
2012 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate); 2020 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2013 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate); 2021 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2014 } 2022 }
2015 2023
2016 bfa_ioc_msgflush(ioc); 2024 bfa_ioc_msgflush(ioc);
@@ -2038,7 +2046,7 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2038bfa_boolean_t 2046bfa_boolean_t
2039bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) 2047bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2040{ 2048{
2041 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); 2049 u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2042 2050
2043 return ((r32 != BFI_IOC_UNINIT) && 2051 return ((r32 != BFI_IOC_UNINIT) &&
2044 (r32 != BFI_IOC_INITING) && 2052 (r32 != BFI_IOC_INITING) &&
@@ -2430,12 +2438,12 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2430 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 2438 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2431 return BFA_FALSE; 2439 return BFA_FALSE;
2432 2440
2433 ioc_state = readl(ioc->ioc_regs.ioc_fwstate); 2441 ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2434 if (!bfa_ioc_state_disabled(ioc_state)) 2442 if (!bfa_ioc_state_disabled(ioc_state))
2435 return BFA_FALSE; 2443 return BFA_FALSE;
2436 2444
2437 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { 2445 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2438 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate); 2446 ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2439 if (!bfa_ioc_state_disabled(ioc_state)) 2447 if (!bfa_ioc_state_disabled(ioc_state))
2440 return BFA_FALSE; 2448 return BFA_FALSE;
2441 } 2449 }
@@ -2449,8 +2457,8 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2449void 2457void
2450bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc) 2458bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2451{ 2459{
2452 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 2460 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2453 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); 2461 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2454} 2462}
2455 2463
2456#define BFA_MFG_NAME "Brocade" 2464#define BFA_MFG_NAME "Brocade"
@@ -2917,7 +2925,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
2917static void 2925static void
2918bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) 2926bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2919{ 2927{
2920 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); 2928 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
2921 2929
2922 bfa_trc(ioc, fwstate); 2930 bfa_trc(ioc, fwstate);
2923 2931
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 23a90e7b7107..de62b68beb64 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -346,6 +346,12 @@ struct bfa_ioc_hwif_s {
346 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); 346 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
347 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); 347 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
348 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc); 348 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
349 void (*ioc_set_fwstate) (struct bfa_ioc_s *ioc,
350 enum bfi_ioc_state fwstate);
351 enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc_s *ioc);
352 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
353 enum bfi_ioc_state fwstate);
354 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
349}; 355};
350 356
351/* 357/*
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 30df8a284715..e3b928746674 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -22,6 +22,8 @@
22 22
23BFA_TRC_FILE(CNA, IOC_CB); 23BFA_TRC_FILE(CNA, IOC_CB);
24 24
25#define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH))
26
25/* 27/*
26 * forward declarations 28 * forward declarations
27 */ 29 */
@@ -37,6 +39,12 @@ static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
37static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); 39static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
38static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); 40static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
39static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc); 41static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
42static void bfa_ioc_cb_set_cur_ioc_fwstate(
43 struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
44static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
45static void bfa_ioc_cb_set_alt_ioc_fwstate(
46 struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
47static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
40 48
41static struct bfa_ioc_hwif_s hwif_cb; 49static struct bfa_ioc_hwif_s hwif_cb;
42 50
@@ -59,6 +67,10 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
59 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; 67 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
60 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; 68 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
61 hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete; 69 hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
70 hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate;
71 hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate;
72 hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate;
73 hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate;
62 74
63 ioc->ioc_hwif = &hwif_cb; 75 ioc->ioc_hwif = &hwif_cb;
64} 76}
@@ -187,6 +199,20 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
187static bfa_boolean_t 199static bfa_boolean_t
188bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc) 200bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
189{ 201{
202 u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
203
204 /**
205 * Driver load time. If the join bit is set,
206 * it is due to an unclean exit by the driver for this
207 * PCI fn in the previous incarnation. Whoever comes here first
208 * should clean it up, no matter which PCI fn.
209 */
210 if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) {
211 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
212 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
213 return BFA_TRUE;
214 }
215
190 return bfa_ioc_cb_sync_complete(ioc); 216 return bfa_ioc_cb_sync_complete(ioc);
191} 217}
192 218
@@ -212,24 +238,66 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
212static void 238static void
213bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc) 239bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
214{ 240{
241 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
242 u32 join_pos = bfa_ioc_cb_join_pos(ioc);
243
244 writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate);
215} 245}
216 246
217static void 247static void
218bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc) 248bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
219{ 249{
250 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
251 u32 join_pos = bfa_ioc_cb_join_pos(ioc);
252
253 writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate);
254}
255
256static void
257bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
258 enum bfi_ioc_state fwstate)
259{
260 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
261
262 writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
263 ioc->ioc_regs.ioc_fwstate);
264}
265
266static enum bfi_ioc_state
267bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
268{
269 return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) &
270 BFA_IOC_CB_FWSTATE_MASK);
271}
272
273static void
274bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
275 enum bfi_ioc_state fwstate)
276{
277 u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate);
278
279 writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)),
280 ioc->ioc_regs.alt_ioc_fwstate);
281}
282
283static enum bfi_ioc_state
284bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
285{
286 return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) &
287 BFA_IOC_CB_FWSTATE_MASK);
220} 288}
221 289
222static void 290static void
223bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc) 291bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
224{ 292{
225 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 293 bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
226} 294}
227 295
228static bfa_boolean_t 296static bfa_boolean_t
229bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) 297bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
230{ 298{
231 uint32_t fwstate, alt_fwstate; 299 u32 fwstate, alt_fwstate;
232 fwstate = readl(ioc->ioc_regs.ioc_fwstate); 300 fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc);
233 301
234 /* 302 /*
235 * At this point, this IOC is hoding the hw sem in the 303 * At this point, this IOC is hoding the hw sem in the
@@ -257,7 +325,7 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
257 fwstate == BFI_IOC_OP) 325 fwstate == BFI_IOC_OP)
258 return BFA_TRUE; 326 return BFA_TRUE;
259 else { 327 else {
260 alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate); 328 alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc);
261 if (alt_fwstate == BFI_IOC_FAIL || 329 if (alt_fwstate == BFI_IOC_FAIL ||
262 alt_fwstate == BFI_IOC_DISABLED || 330 alt_fwstate == BFI_IOC_DISABLED ||
263 alt_fwstate == BFI_IOC_UNINIT || 331 alt_fwstate == BFI_IOC_UNINIT ||
@@ -272,7 +340,7 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
272bfa_status_t 340bfa_status_t
273bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode) 341bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
274{ 342{
275 u32 pll_sclk, pll_fclk; 343 u32 pll_sclk, pll_fclk, join_bits;
276 344
277 pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN | 345 pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
278 __APP_PLL_SCLK_P0_1(3U) | 346 __APP_PLL_SCLK_P0_1(3U) |
@@ -282,8 +350,12 @@ bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
282 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | 350 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
283 __APP_PLL_LCLK_JITLMT0_1(3U) | 351 __APP_PLL_LCLK_JITLMT0_1(3U) |
284 __APP_PLL_LCLK_CNTLMT0_1(3U); 352 __APP_PLL_LCLK_CNTLMT0_1(3U);
285 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); 353 join_bits = readl(rb + BFA_IOC0_STATE_REG) &
286 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); 354 BFA_IOC_CB_JOIN_MASK;
355 writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG));
356 join_bits = readl(rb + BFA_IOC1_STATE_REG) &
357 BFA_IOC_CB_JOIN_MASK;
358 writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG));
287 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); 359 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
288 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); 360 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
289 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); 361 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index a8e52a108710..bd53150e4ee0 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -43,6 +43,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
43static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); 43static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
44static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); 44static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
45static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); 45static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
46static void bfa_ioc_ct_set_cur_ioc_fwstate(
47 struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
48static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
49static void bfa_ioc_ct_set_alt_ioc_fwstate(
50 struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
51static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
46 52
47static struct bfa_ioc_hwif_s hwif_ct; 53static struct bfa_ioc_hwif_s hwif_ct;
48static struct bfa_ioc_hwif_s hwif_ct2; 54static struct bfa_ioc_hwif_s hwif_ct2;
@@ -512,6 +518,10 @@ bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
512 hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave; 518 hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
513 hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack; 519 hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
514 hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete; 520 hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
521 hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
522 hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
523 hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
524 hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
515} 525}
516 526
517/** 527/**
@@ -959,3 +969,29 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
959 969
960 return BFA_STATUS_OK; 970 return BFA_STATUS_OK;
961} 971}
972
973static void
974bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
975 enum bfi_ioc_state fwstate)
976{
977 writel(fwstate, ioc->ioc_regs.ioc_fwstate);
978}
979
980static enum bfi_ioc_state
981bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
982{
983 return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
984}
985
986static void
987bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
988 enum bfi_ioc_state fwstate)
989{
990 writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
991}
992
993static enum bfi_ioc_state
994bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
995{
996 return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
997}
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index e70e0832eafd..bf0a58d03be8 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -374,6 +374,10 @@ enum bfi_ioc_state {
374 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ 374 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
375}; 375};
376 376
377#define BFA_IOC_CB_JOIN_SH 16
378#define BFA_IOC_CB_FWSTATE_MASK 0x0000ffff
379#define BFA_IOC_CB_JOIN_MASK 0xffff0000
380
377#define BFI_IOC_ENDIAN_SIG 0x12345678 381#define BFI_IOC_ENDIAN_SIG 0x12345678
378 382
379enum { 383enum {