aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bna/bfa_ioc.c
diff options
context:
space:
mode:
authorRasesh Mody <rmody@brocade.com>2010-12-23 16:45:09 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-25 22:16:03 -0500
commit1d32f7696286eef9e5644eb57e79a36756274357 (patch)
treeb31e78cc2c72ae9893ac8dea1401cff2b94e104e /drivers/net/bna/bfa_ioc.c
parentaad75b66f1d3784514351f06bc589c55d5325bc8 (diff)
bna: IOC failure auto recovery fix
Change Details: - Made IOC auto_recovery synchronized and not timer based. - Only one PCI function will attempt to recover and reinitialize the ASIC on a failure, that too after all the active PCI functions acknowledge the IOC failure. Signed-off-by: Debashis Dutt <ddutt@brocade.com> Signed-off-by: Rasesh Mody <rmody@brocade.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bna/bfa_ioc.c')
-rw-r--r--drivers/net/bna/bfa_ioc.c1174
1 files changed, 888 insertions, 286 deletions
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index 8ed147e803c3..34933cb9569f 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -26,25 +26,6 @@
26 * IOC local definitions 26 * IOC local definitions
27 */ 27 */
28 28
29#define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
33
34#define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
37
38#define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
42
43#define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
47
48/** 29/**
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 30 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */ 31 */
@@ -55,8 +36,16 @@
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 36 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 37#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58#define bfa_ioc_notify_hbfail(__ioc) \ 39#define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41#define bfa_ioc_sync_join(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43#define bfa_ioc_sync_leave(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
45#define bfa_ioc_sync_ack(__ioc) \
46 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
47#define bfa_ioc_sync_complete(__ioc) \
48 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
60 49
61#define bfa_ioc_mbox_cmd_pending(__ioc) \ 50#define bfa_ioc_mbox_cmd_pending(__ioc) \
62 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 51 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -82,6 +71,12 @@ static void bfa_ioc_recover(struct bfa_ioc *ioc);
82static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); 71static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
83static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 72static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
84static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 73static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
74static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
75static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
76static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
77static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
78static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
79static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
85static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 80static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
86 u32 boot_param); 81 u32 boot_param);
87static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 82static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
@@ -100,69 +95,171 @@ static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
100static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 95static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
101 96
102/** 97/**
103 * IOC state machine events 98 * IOC state machine definitions/declarations
104 */ 99 */
105enum ioc_event { 100enum ioc_event {
106 IOC_E_ENABLE = 1, /*!< IOC enable request */ 101 IOC_E_RESET = 1, /*!< IOC reset request */
107 IOC_E_DISABLE = 2, /*!< IOC disable request */ 102 IOC_E_ENABLE = 2, /*!< IOC enable request */
108 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */ 103 IOC_E_DISABLE = 3, /*!< IOC disable request */
109 IOC_E_FWREADY = 4, /*!< f/w initialization done */ 104 IOC_E_DETACH = 4, /*!< driver detach cleanup */
110 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */ 105 IOC_E_ENABLED = 5, /*!< f/w enabled */
111 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */ 106 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
112 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */ 107 IOC_E_DISABLED = 7, /*!< f/w disabled */
113 IOC_E_HBFAIL = 8, /*!< heartbeat failure */ 108 IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
114 IOC_E_HWERROR = 9, /*!< hardware error interrupt */ 109 IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */
115 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 110 IOC_E_HBFAIL = 10, /*!< heartbeat failure */
116 IOC_E_DETACH = 11, /*!< driver detach cleanup */ 111 IOC_E_HWERROR = 11, /*!< hardware error interrupt */
112 IOC_E_TIMEOUT = 12, /*!< timeout */
117}; 113};
118 114
115bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); 116bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); 117bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
125bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); 118bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); 119bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event); 120bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event); 121bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 122bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 123bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
131 124
132static struct bfa_sm_table ioc_sm_table[] = { 125static struct bfa_sm_table ioc_sm_table[] = {
126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
133 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 127 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
134 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, 128 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
135 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
136 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
137 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
138 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
139 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 129 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
140 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 130 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
141 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, 131 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
142 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, 132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
143 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
145}; 135};
146 136
147/** 137/**
138 * IOCPF state machine definitions/declarations
139 */
140
141/*
142 * Forward declareations for iocpf state machine
143 */
144static void bfa_iocpf_enable(struct bfa_ioc *ioc);
145static void bfa_iocpf_disable(struct bfa_ioc *ioc);
146static void bfa_iocpf_fail(struct bfa_ioc *ioc);
147static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149static void bfa_iocpf_stop(struct bfa_ioc *ioc);
150
151/**
152 * IOCPF state machine events
153 */
154enum iocpf_event {
155 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
156 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
157 IOCPF_E_STOP = 3, /*!< stop on driver detach */
158 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
159 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
160 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
161 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
162 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
163 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
164 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
165 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
166};
167
168/**
169 * IOCPF states
170 */
171enum bfa_iocpf_state {
172 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
173 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
174 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
175 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
176 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
177 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
178 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
179 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
180 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
181};
182
183bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
184bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
185bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
186bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
187bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
188bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
189bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
190bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
191 enum iocpf_event);
192bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
193bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
194bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
195bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
196bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
197 enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
199
200static struct bfa_sm_table iocpf_sm_table[] = {
201 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
202 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
203 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
204 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
205 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
206 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
207 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
208 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
209 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
210 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
211 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
212 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
213 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
214 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
215};
216
217/**
218 * IOC State Machine
219 */
220
221/**
222 * Beginning state. IOC uninit state.
223 */
224static void
225bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
226{
227}
228
229/**
230 * IOC is in uninit state.
231 */
232static void
233bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
234{
235 switch (event) {
236 case IOC_E_RESET:
237 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
238 break;
239
240 default:
241 bfa_sm_fault(ioc, event);
242 }
243}
244
245/**
148 * Reset entry actions -- initialize state machine 246 * Reset entry actions -- initialize state machine
149 */ 247 */
150static void 248static void
151bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 249bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
152{ 250{
153 ioc->retry_count = 0; 251 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
154 ioc->auto_recover = bfa_nw_auto_recover;
155} 252}
156 253
157/** 254/**
158 * Beginning state. IOC is in reset state. 255 * IOC is in reset state.
159 */ 256 */
160static void 257static void
161bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 258bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
162{ 259{
163 switch (event) { 260 switch (event) {
164 case IOC_E_ENABLE: 261 case IOC_E_ENABLE:
165 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 262 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
166 break; 263 break;
167 264
168 case IOC_E_DISABLE: 265 case IOC_E_DISABLE:
@@ -170,6 +267,7 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
170 break; 267 break;
171 268
172 case IOC_E_DETACH: 269 case IOC_E_DETACH:
270 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
173 break; 271 break;
174 272
175 default: 273 default:
@@ -177,42 +275,43 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
177 } 275 }
178} 276}
179 277
180/**
181 * Semaphore should be acquired for version check.
182 */
183static void 278static void
184bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc) 279bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
185{ 280{
186 bfa_ioc_hw_sem_get(ioc); 281 bfa_iocpf_enable(ioc);
187} 282}
188 283
189/** 284/**
190 * Awaiting h/w semaphore to continue with version check. 285 * Host IOC function is being enabled, awaiting response from firmware.
286 * Semaphore is acquired.
191 */ 287 */
192static void 288static void
193bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event) 289bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
194{ 290{
195 switch (event) { 291 switch (event) {
196 case IOC_E_SEMLOCKED: 292 case IOC_E_ENABLED:
197 if (bfa_ioc_firmware_lock(ioc)) { 293 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
198 ioc->retry_count = 0; 294 break;
199 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 295
200 } else { 296 case IOC_E_PFAILED:
201 bfa_nw_ioc_hw_sem_release(ioc); 297 /* !!! fall through !!! */
202 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); 298 case IOC_E_HWERROR:
203 } 299 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
300 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
301 if (event != IOC_E_PFAILED)
302 bfa_iocpf_initfail(ioc);
204 break; 303 break;
205 304
206 case IOC_E_DISABLE: 305 case IOC_E_DISABLE:
207 bfa_ioc_disable_comp(ioc); 306 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
208 /* fall through */ 307 break;
209 308
210 case IOC_E_DETACH: 309 case IOC_E_DETACH:
211 bfa_ioc_hw_sem_get_cancel(ioc); 310 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
212 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 311 bfa_iocpf_stop(ioc);
213 break; 312 break;
214 313
215 case IOC_E_FWREADY: 314 case IOC_E_ENABLE:
216 break; 315 break;
217 316
218 default: 317 default:
@@ -221,41 +320,85 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
221} 320}
222 321
223/** 322/**
224 * Notify enable completion callback and generate mismatch AEN. 323 * Semaphore should be acquired for version check.
225 */ 324 */
226static void 325static void
227bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc) 326bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
228{ 327{
229 /** 328 mod_timer(&ioc->ioc_timer, jiffies +
230 * Provide enable completion callback and AEN notification only once. 329 msecs_to_jiffies(BFA_IOC_TOV));
231 */ 330 bfa_ioc_send_getattr(ioc);
232 if (ioc->retry_count == 0)
233 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
234 ioc->retry_count++;
235 bfa_ioc_timer_start(ioc);
236} 331}
237 332
238/** 333/**
239 * Awaiting firmware version match. 334 * IOC configuration in progress. Timer is active.
240 */ 335 */
241static void 336static void
242bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event) 337bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
243{ 338{
244 switch (event) { 339 switch (event) {
340 case IOC_E_FWRSP_GETATTR:
341 del_timer(&ioc->ioc_timer);
342 bfa_ioc_check_attr_wwns(ioc);
343 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
344 break;
345
346 case IOC_E_PFAILED:
347 case IOC_E_HWERROR:
348 del_timer(&ioc->ioc_timer);
349 /* fall through */
245 case IOC_E_TIMEOUT: 350 case IOC_E_TIMEOUT:
246 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 351 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
352 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
353 if (event != IOC_E_PFAILED)
354 bfa_iocpf_getattrfail(ioc);
247 break; 355 break;
248 356
249 case IOC_E_DISABLE: 357 case IOC_E_DISABLE:
250 bfa_ioc_disable_comp(ioc); 358 del_timer(&ioc->ioc_timer);
251 /* fall through */ 359 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
360 break;
252 361
253 case IOC_E_DETACH: 362 case IOC_E_ENABLE:
254 bfa_ioc_timer_stop(ioc); 363 break;
255 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 364
365 default:
366 bfa_sm_fault(ioc, event);
367 }
368}
369
370static void
371bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
372{
373 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
374 bfa_ioc_hb_monitor(ioc);
375}
376
377static void
378bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
379{
380 switch (event) {
381 case IOC_E_ENABLE:
382 break;
383
384 case IOC_E_DISABLE:
385 bfa_ioc_hb_stop(ioc);
386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
256 break; 387 break;
257 388
258 case IOC_E_FWREADY: 389 case IOC_E_PFAILED:
390 case IOC_E_HWERROR:
391 bfa_ioc_hb_stop(ioc);
392 /* !!! fall through !!! */
393 case IOC_E_HBFAIL:
394 bfa_ioc_fail_notify(ioc);
395 if (ioc->iocpf.auto_recover)
396 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
397 else
398 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
399
400 if (event != IOC_E_PFAILED)
401 bfa_iocpf_fail(ioc);
259 break; 402 break;
260 403
261 default: 404 default:
@@ -263,30 +406,61 @@ bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
263 } 406 }
264} 407}
265 408
409static void
410bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
411{
412 bfa_iocpf_disable(ioc);
413}
414
266/** 415/**
267 * Request for semaphore. 416 * IOC is being desabled
268 */ 417 */
269static void 418static void
270bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc) 419bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
271{ 420{
272 bfa_ioc_hw_sem_get(ioc); 421 switch (event) {
422 case IOC_E_DISABLED:
423 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
424 break;
425
426 case IOC_E_HWERROR:
427 /*
428 * No state change. Will move to disabled state
429 * after iocpf sm completes failure processing and
430 * moves to disabled state.
431 */
432 bfa_iocpf_fail(ioc);
433 break;
434
435 default:
436 bfa_sm_fault(ioc, event);
437 }
273} 438}
274 439
275/** 440/**
276 * Awaiting semaphore for h/w initialzation. 441 * IOC desable completion entry.
277 */ 442 */
278static void 443static void
279bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event) 444bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
445{
446 bfa_ioc_disable_comp(ioc);
447}
448
449static void
450bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
280{ 451{
281 switch (event) { 452 switch (event) {
282 case IOC_E_SEMLOCKED: 453 case IOC_E_ENABLE:
283 ioc->retry_count = 0; 454 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
284 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
285 break; 455 break;
286 456
287 case IOC_E_DISABLE: 457 case IOC_E_DISABLE:
288 bfa_ioc_hw_sem_get_cancel(ioc); 458 ioc->cbfn->disable_cbfn(ioc->bfa);
289 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 459 break;
460
461 case IOC_E_DETACH:
462 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
463 bfa_iocpf_stop(ioc);
290 break; 464 break;
291 465
292 default: 466 default:
@@ -295,46 +469,45 @@ bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
295} 469}
296 470
297static void 471static void
298bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc) 472bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
299{ 473{
300 bfa_ioc_timer_start(ioc);
301 bfa_ioc_reset(ioc, false);
302} 474}
303 475
304/** 476/**
305 * @brief 477 * Hardware initialization retry.
306 * Hardware is being initialized. Interrupts are enabled.
307 * Holding hardware semaphore lock.
308 */ 478 */
309static void 479static void
310bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event) 480bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
311{ 481{
312 switch (event) { 482 switch (event) {
313 case IOC_E_FWREADY: 483 case IOC_E_ENABLED:
314 bfa_ioc_timer_stop(ioc); 484 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
315 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
316 break; 485 break;
317 486
487 case IOC_E_PFAILED:
318 case IOC_E_HWERROR: 488 case IOC_E_HWERROR:
319 bfa_ioc_timer_stop(ioc); 489 /**
320 /* fall through */ 490 * Initialization retry failed.
491 */
492 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
493 if (event != IOC_E_PFAILED)
494 bfa_iocpf_initfail(ioc);
495 break;
321 496
322 case IOC_E_TIMEOUT: 497 case IOC_E_INITFAILED:
323 ioc->retry_count++; 498 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
324 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 499 break;
325 bfa_ioc_timer_start(ioc);
326 bfa_ioc_reset(ioc, true);
327 break;
328 }
329 500
330 bfa_nw_ioc_hw_sem_release(ioc); 501 case IOC_E_ENABLE:
331 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
332 break; 502 break;
333 503
334 case IOC_E_DISABLE: 504 case IOC_E_DISABLE:
335 bfa_nw_ioc_hw_sem_release(ioc); 505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
336 bfa_ioc_timer_stop(ioc); 506 break;
337 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 507
508 case IOC_E_DETACH:
509 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
510 bfa_iocpf_stop(ioc);
338 break; 511 break;
339 512
340 default: 513 default:
@@ -343,51 +516,248 @@ bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
343} 516}
344 517
345static void 518static void
346bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 519bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
347{ 520{
348 bfa_ioc_timer_start(ioc);
349 bfa_ioc_send_enable(ioc);
350} 521}
351 522
352/** 523/**
353 * Host IOC function is being enabled, awaiting response from firmware. 524 * IOC failure.
354 * Semaphore is acquired.
355 */ 525 */
356static void 526static void
357bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 527bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
358{ 528{
359 switch (event) { 529 switch (event) {
360 case IOC_E_FWRSP_ENABLE: 530 case IOC_E_ENABLE:
361 bfa_ioc_timer_stop(ioc); 531 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
362 bfa_nw_ioc_hw_sem_release(ioc); 532 break;
363 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 533
534 case IOC_E_DISABLE:
535 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
536 break;
537
538 case IOC_E_DETACH:
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
540 bfa_iocpf_stop(ioc);
364 break; 541 break;
365 542
366 case IOC_E_HWERROR: 543 case IOC_E_HWERROR:
367 bfa_ioc_timer_stop(ioc); 544 /* HB failure notification, ignore. */
368 /* fall through */ 545 break;
369 546
370 case IOC_E_TIMEOUT: 547 default:
371 ioc->retry_count++; 548 bfa_sm_fault(ioc, event);
372 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 549 }
373 writel(BFI_IOC_UNINIT, 550}
374 ioc->ioc_regs.ioc_fwstate); 551
375 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 552/**
376 break; 553 * IOCPF State Machine
554 */
555
556/**
557 * Reset entry actions -- initialize state machine
558 */
559static void
560bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
561{
562 iocpf->retry_count = 0;
563 iocpf->auto_recover = bfa_nw_auto_recover;
564}
565
566/**
567 * Beginning state. IOC is in reset state.
568 */
569static void
570bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
571{
572 switch (event) {
573 case IOCPF_E_ENABLE:
574 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
575 break;
576
577 case IOCPF_E_STOP:
578 break;
579
580 default:
581 bfa_sm_fault(iocpf->ioc, event);
582 }
583}
584
585/**
586 * Semaphore should be acquired for version check.
587 */
588static void
589bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
590{
591 bfa_ioc_hw_sem_get(iocpf->ioc);
592}
593
594/**
595 * Awaiting h/w semaphore to continue with version check.
596 */
597static void
598bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
599{
600 struct bfa_ioc *ioc = iocpf->ioc;
601
602 switch (event) {
603 case IOCPF_E_SEMLOCKED:
604 if (bfa_ioc_firmware_lock(ioc)) {
605 if (bfa_ioc_sync_complete(ioc)) {
606 iocpf->retry_count = 0;
607 bfa_ioc_sync_join(ioc);
608 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
609 } else {
610 bfa_ioc_firmware_unlock(ioc);
611 bfa_nw_ioc_hw_sem_release(ioc);
612 mod_timer(&ioc->sem_timer, jiffies +
613 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
614 }
615 } else {
616 bfa_nw_ioc_hw_sem_release(ioc);
617 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
377 } 618 }
619 break;
378 620
379 bfa_nw_ioc_hw_sem_release(ioc); 621 case IOCPF_E_DISABLE:
380 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 622 bfa_ioc_hw_sem_get_cancel(ioc);
623 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
624 bfa_ioc_pf_disabled(ioc);
381 break; 625 break;
382 626
383 case IOC_E_DISABLE: 627 case IOCPF_E_STOP:
384 bfa_ioc_timer_stop(ioc); 628 bfa_ioc_hw_sem_get_cancel(ioc);
629 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
630 break;
631
632 default:
633 bfa_sm_fault(ioc, event);
634 }
635}
636
637/**
638 * Notify enable completion callback
639 */
640static void
641bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
642{
643 /* Call only the first time sm enters fwmismatch state. */
644 if (iocpf->retry_count == 0)
645 bfa_ioc_pf_fwmismatch(iocpf->ioc);
646
647 iocpf->retry_count++;
648 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
649 msecs_to_jiffies(BFA_IOC_TOV));
650}
651
652/**
653 * Awaiting firmware version match.
654 */
655static void
656bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
657{
658 struct bfa_ioc *ioc = iocpf->ioc;
659
660 switch (event) {
661 case IOCPF_E_TIMEOUT:
662 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
663 break;
664
665 case IOCPF_E_DISABLE:
666 del_timer(&ioc->iocpf_timer);
667 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668 bfa_ioc_pf_disabled(ioc);
669 break;
670
671 case IOCPF_E_STOP:
672 del_timer(&ioc->iocpf_timer);
673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674 break;
675
676 default:
677 bfa_sm_fault(ioc, event);
678 }
679}
680
681/**
682 * Request for semaphore.
683 */
684static void
685bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
686{
687 bfa_ioc_hw_sem_get(iocpf->ioc);
688}
689
690/**
691 * Awaiting semaphore for h/w initialzation.
692 */
693static void
694bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
695{
696 struct bfa_ioc *ioc = iocpf->ioc;
697
698 switch (event) {
699 case IOCPF_E_SEMLOCKED:
700 if (bfa_ioc_sync_complete(ioc)) {
701 bfa_ioc_sync_join(ioc);
702 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
703 } else {
704 bfa_nw_ioc_hw_sem_release(ioc);
705 mod_timer(&ioc->sem_timer, jiffies +
706 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
707 }
708 break;
709
710 case IOCPF_E_DISABLE:
711 bfa_ioc_hw_sem_get_cancel(ioc);
712 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
713 break;
714
715 default:
716 bfa_sm_fault(ioc, event);
717 }
718}
719
720static void
721bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
722{
723 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
724 msecs_to_jiffies(BFA_IOC_TOV));
725 bfa_ioc_reset(iocpf->ioc, 0);
726}
727
728/**
729 * Hardware is being initialized. Interrupts are enabled.
730 * Holding hardware semaphore lock.
731 */
732static void
733bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
734{
735 struct bfa_ioc *ioc = iocpf->ioc;
736
737 switch (event) {
738 case IOCPF_E_FWREADY:
739 del_timer(&ioc->iocpf_timer);
740 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
741 break;
742
743 case IOCPF_E_INITFAIL:
744 del_timer(&ioc->iocpf_timer);
745 /*
746 * !!! fall through !!!
747 */
748
749 case IOCPF_E_TIMEOUT:
385 bfa_nw_ioc_hw_sem_release(ioc); 750 bfa_nw_ioc_hw_sem_release(ioc);
386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 751 if (event == IOCPF_E_TIMEOUT)
752 bfa_ioc_pf_failed(ioc);
753 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
387 break; 754 break;
388 755
389 case IOC_E_FWREADY: 756 case IOCPF_E_DISABLE:
390 bfa_ioc_send_enable(ioc); 757 del_timer(&ioc->iocpf_timer);
758 bfa_ioc_sync_leave(ioc);
759 bfa_nw_ioc_hw_sem_release(ioc);
760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
391 break; 761 break;
392 762
393 default: 763 default:
@@ -396,37 +766,49 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
396} 766}
397 767
398static void 768static void
399bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 769bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
400{ 770{
401 bfa_ioc_timer_start(ioc); 771 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
402 bfa_ioc_send_getattr(ioc); 772 msecs_to_jiffies(BFA_IOC_TOV));
773 bfa_ioc_send_enable(iocpf->ioc);
403} 774}
404 775
405/** 776/**
406 * @brief 777 * Host IOC function is being enabled, awaiting response from firmware.
407 * IOC configuration in progress. Timer is active. 778 * Semaphore is acquired.
408 */ 779 */
409static void 780static void
410bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 781bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
411{ 782{
783 struct bfa_ioc *ioc = iocpf->ioc;
784
412 switch (event) { 785 switch (event) {
413 case IOC_E_FWRSP_GETATTR: 786 case IOCPF_E_FWRSP_ENABLE:
414 bfa_ioc_timer_stop(ioc); 787 del_timer(&ioc->iocpf_timer);
415 bfa_ioc_check_attr_wwns(ioc); 788 bfa_nw_ioc_hw_sem_release(ioc);
416 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 789 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
417 break; 790 break;
418 791
419 case IOC_E_HWERROR: 792 case IOCPF_E_INITFAIL:
420 bfa_ioc_timer_stop(ioc); 793 del_timer(&ioc->iocpf_timer);
421 /* fall through */ 794 /*
795 * !!! fall through !!!
796 */
797 case IOCPF_E_TIMEOUT:
798 bfa_nw_ioc_hw_sem_release(ioc);
799 if (event == IOCPF_E_TIMEOUT)
800 bfa_ioc_pf_failed(ioc);
801 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
802 break;
422 803
423 case IOC_E_TIMEOUT: 804 case IOCPF_E_DISABLE:
424 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 805 del_timer(&ioc->iocpf_timer);
806 bfa_nw_ioc_hw_sem_release(ioc);
807 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
425 break; 808 break;
426 809
427 case IOC_E_DISABLE: 810 case IOCPF_E_FWREADY:
428 bfa_ioc_timer_stop(ioc); 811 bfa_ioc_send_enable(ioc);
429 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
430 break; 812 break;
431 813
432 default: 814 default:
@@ -434,36 +816,42 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
434 } 816 }
435} 817}
436 818
819static bool
820bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
821{
822 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
823}
824
437static void 825static void
438bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) 826bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
439{ 827{
440 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 828 bfa_ioc_pf_enabled(iocpf->ioc);
441 bfa_ioc_hb_monitor(ioc);
442} 829}
443 830
444static void 831static void
445bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) 832bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
446{ 833{
834 struct bfa_ioc *ioc = iocpf->ioc;
835
447 switch (event) { 836 switch (event) {
448 case IOC_E_ENABLE: 837 case IOCPF_E_DISABLE:
838 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
449 break; 839 break;
450 840
451 case IOC_E_DISABLE: 841 case IOCPF_E_GETATTRFAIL:
452 bfa_ioc_hb_stop(ioc); 842 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
453 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
454 break; 843 break;
455 844
456 case IOC_E_HWERROR: 845 case IOCPF_E_FAIL:
457 case IOC_E_FWREADY: 846 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
458 /** 847 break;
459 * Hard error or IOC recovery by other function.
460 * Treat it same as heartbeat failure.
461 */
462 bfa_ioc_hb_stop(ioc);
463 /* !!! fall through !!! */
464 848
465 case IOC_E_HBFAIL: 849 case IOCPF_E_FWREADY:
466 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); 850 bfa_ioc_pf_failed(ioc);
851 if (bfa_nw_ioc_is_operational(ioc))
852 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
853 else
854 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
467 break; 855 break;
468 856
469 default: 857 default:
@@ -472,33 +860,40 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
472} 860}
473 861
474static void 862static void
475bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) 863bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
476{ 864{
477 bfa_ioc_timer_start(ioc); 865 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
478 bfa_ioc_send_disable(ioc); 866 msecs_to_jiffies(BFA_IOC_TOV));
867 bfa_ioc_send_disable(iocpf->ioc);
479} 868}
480 869
481/** 870/**
482 * IOC is being disabled 871 * IOC is being disabled
483 */ 872 */
484static void 873static void
485bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 874bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
486{ 875{
876 struct bfa_ioc *ioc = iocpf->ioc;
877
487 switch (event) { 878 switch (event) {
488 case IOC_E_FWRSP_DISABLE: 879 case IOCPF_E_FWRSP_DISABLE:
489 bfa_ioc_timer_stop(ioc); 880 case IOCPF_E_FWREADY:
490 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 881 del_timer(&ioc->iocpf_timer);
882 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
491 break; 883 break;
492 884
493 case IOC_E_HWERROR: 885 case IOCPF_E_FAIL:
494 bfa_ioc_timer_stop(ioc); 886 del_timer(&ioc->iocpf_timer);
495 /* 887 /*
496 * !!! fall through !!! 888 * !!! fall through !!!
497 */ 889 */
498 890
499 case IOC_E_TIMEOUT: 891 case IOCPF_E_TIMEOUT:
500 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 892 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 893 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
894 break;
895
896 case IOCPF_E_FWRSP_ENABLE:
502 break; 897 break;
503 898
504 default: 899 default:
@@ -506,33 +901,58 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
506 } 901 }
507} 902}
508 903
509/**
510 * IOC disable completion entry.
511 */
512static void 904static void
513bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 905bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
514{ 906{
515 bfa_ioc_disable_comp(ioc); 907 bfa_ioc_hw_sem_get(iocpf->ioc);
516} 908}
517 909
910/**
911 * IOC hb ack request is being removed.
912 */
518static void 913static void
519bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) 914bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
520{ 915{
916 struct bfa_ioc *ioc = iocpf->ioc;
917
521 switch (event) { 918 switch (event) {
522 case IOC_E_ENABLE: 919 case IOCPF_E_SEMLOCKED:
523 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 920 bfa_ioc_sync_leave(ioc);
921 bfa_nw_ioc_hw_sem_release(ioc);
922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
524 break; 923 break;
525 924
526 case IOC_E_DISABLE: 925 case IOCPF_E_FAIL:
527 ioc->cbfn->disable_cbfn(ioc->bfa);
528 break; 926 break;
529 927
530 case IOC_E_FWREADY: 928 default:
929 bfa_sm_fault(ioc, event);
930 }
931}
932
933/**
934 * IOC disable completion entry.
935 */
936static void
937bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
938{
939 bfa_ioc_pf_disabled(iocpf->ioc);
940}
941
942static void
943bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
944{
945 struct bfa_ioc *ioc = iocpf->ioc;
946
947 switch (event) {
948 case IOCPF_E_ENABLE:
949 iocpf->retry_count = 0;
950 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
531 break; 951 break;
532 952
533 case IOC_E_DETACH: 953 case IOCPF_E_STOP:
534 bfa_ioc_firmware_unlock(ioc); 954 bfa_ioc_firmware_unlock(ioc);
535 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 955 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
536 break; 956 break;
537 957
538 default: 958 default:
@@ -541,33 +961,50 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
541} 961}
542 962
543static void 963static void
544bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc) 964bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
545{ 965{
546 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 966 bfa_ioc_hw_sem_get(iocpf->ioc);
547 bfa_ioc_timer_start(ioc);
548} 967}
549 968
550/** 969/**
551 * @brief
552 * Hardware initialization failed. 970 * Hardware initialization failed.
553 */ 971 */
554static void 972static void
555bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event) 973bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
556{ 974{
975 struct bfa_ioc *ioc = iocpf->ioc;
976
557 switch (event) { 977 switch (event) {
558 case IOC_E_DISABLE: 978 case IOCPF_E_SEMLOCKED:
559 bfa_ioc_timer_stop(ioc); 979 bfa_ioc_notify_fail(ioc);
560 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 980 bfa_ioc_sync_ack(ioc);
981 iocpf->retry_count++;
982 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
983 bfa_ioc_sync_leave(ioc);
984 bfa_nw_ioc_hw_sem_release(ioc);
985 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
986 } else {
987 if (bfa_ioc_sync_complete(ioc))
988 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
989 else {
990 bfa_nw_ioc_hw_sem_release(ioc);
991 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
992 }
993 }
561 break; 994 break;
562 995
563 case IOC_E_DETACH: 996 case IOCPF_E_DISABLE:
564 bfa_ioc_timer_stop(ioc); 997 bfa_ioc_hw_sem_get_cancel(ioc);
998 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
999 break;
1000
1001 case IOCPF_E_STOP:
1002 bfa_ioc_hw_sem_get_cancel(ioc);
565 bfa_ioc_firmware_unlock(ioc); 1003 bfa_ioc_firmware_unlock(ioc);
566 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1004 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
567 break; 1005 break;
568 1006
569 case IOC_E_TIMEOUT: 1007 case IOCPF_E_FAIL:
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
571 break; 1008 break;
572 1009
573 default: 1010 default:
@@ -576,80 +1013,108 @@ bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
576} 1013}
577 1014
578static void 1015static void
579bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc) 1016bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
580{ 1017{
581 struct list_head *qe; 1018 bfa_ioc_pf_initfailed(iocpf->ioc);
582 struct bfa_ioc_hbfail_notify *notify; 1019}
583 1020
584 /** 1021/**
585 * Mark IOC as failed in hardware and stop firmware. 1022 * Hardware initialization failed.
586 */ 1023 */
587 bfa_ioc_lpu_stop(ioc); 1024static void
588 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 1025bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1026{
1027 struct bfa_ioc *ioc = iocpf->ioc;
589 1028
590 /** 1029 switch (event) {
591 * Notify other functions on HB failure. 1030 case IOCPF_E_DISABLE:
592 */ 1031 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
593 bfa_ioc_notify_hbfail(ioc); 1032 break;
594 1033
595 /** 1034 case IOCPF_E_STOP:
596 * Notify driver and common modules registered for notification. 1035 bfa_ioc_firmware_unlock(ioc);
597 */ 1036 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
598 ioc->cbfn->hbfail_cbfn(ioc->bfa); 1037 break;
599 list_for_each(qe, &ioc->hb_notify_q) { 1038
600 notify = (struct bfa_ioc_hbfail_notify *) qe; 1039 default:
601 notify->cbfn(notify->cbarg); 1040 bfa_sm_fault(ioc, event);
602 } 1041 }
1042}
603 1043
1044static void
1045bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1046{
604 /** 1047 /**
605 * Flush any queued up mailbox requests. 1048 * Mark IOC as failed in hardware and stop firmware.
606 */ 1049 */
607 bfa_ioc_mbox_hbfail(ioc); 1050 bfa_ioc_lpu_stop(iocpf->ioc);
608 1051
609 /** 1052 /**
610 * Trigger auto-recovery after a delay. 1053 * Flush any queued up mailbox requests.
611 */ 1054 */
612 if (ioc->auto_recover) 1055 bfa_ioc_mbox_hbfail(iocpf->ioc);
613 mod_timer(&ioc->ioc_timer, jiffies + 1056 bfa_ioc_hw_sem_get(iocpf->ioc);
614 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
615} 1057}
616 1058
617/** 1059/**
618 * @brief 1060 * IOC is in failed state.
619 * IOC heartbeat failure.
620 */ 1061 */
621static void 1062static void
622bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event) 1063bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
623{ 1064{
624 switch (event) { 1065 struct bfa_ioc *ioc = iocpf->ioc;
625 1066
626 case IOC_E_ENABLE: 1067 switch (event) {
627 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1068 case IOCPF_E_SEMLOCKED:
1069 iocpf->retry_count = 0;
1070 bfa_ioc_sync_ack(ioc);
1071 bfa_ioc_notify_fail(ioc);
1072 if (!iocpf->auto_recover) {
1073 bfa_ioc_sync_leave(ioc);
1074 bfa_nw_ioc_hw_sem_release(ioc);
1075 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1076 } else {
1077 if (bfa_ioc_sync_complete(ioc))
1078 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1079 else {
1080 bfa_nw_ioc_hw_sem_release(ioc);
1081 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1082 }
1083 }
628 break; 1084 break;
629 1085
630 case IOC_E_DISABLE: 1086 case IOCPF_E_DISABLE:
631 if (ioc->auto_recover) 1087 bfa_ioc_hw_sem_get_cancel(ioc);
632 bfa_ioc_timer_stop(ioc); 1088 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
633 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
634 break; 1089 break;
635 1090
636 case IOC_E_TIMEOUT: 1091 case IOCPF_E_FAIL:
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
638 break; 1092 break;
639 1093
640 case IOC_E_FWREADY: 1094 default:
641 /** 1095 bfa_sm_fault(ioc, event);
642 * Recovery is already initiated by other function. 1096 }
643 */ 1097}
644 break;
645 1098
646 case IOC_E_HWERROR: 1099static void
647 /* 1100bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
648 * HB failure notification, ignore. 1101{
649 */ 1102}
1103
1104/**
1105 * @brief
1106 * IOC is in failed state.
1107 */
1108static void
1109bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1110{
1111 switch (event) {
1112 case IOCPF_E_DISABLE:
1113 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
650 break; 1114 break;
1115
651 default: 1116 default:
652 bfa_sm_fault(ioc, event); 1117 bfa_sm_fault(iocpf->ioc, event);
653 } 1118 }
654} 1119}
655 1120
@@ -674,14 +1139,6 @@ bfa_ioc_disable_comp(struct bfa_ioc *ioc)
674 } 1139 }
675} 1140}
676 1141
677void
678bfa_nw_ioc_sem_timeout(void *ioc_arg)
679{
680 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
681
682 bfa_ioc_hw_sem_get(ioc);
683}
684
685bool 1142bool
686bfa_nw_ioc_sem_get(void __iomem *sem_reg) 1143bfa_nw_ioc_sem_get(void __iomem *sem_reg)
687{ 1144{
@@ -721,7 +1178,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
721 */ 1178 */
722 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1179 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
723 if (r32 == 0) { 1180 if (r32 == 0) {
724 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); 1181 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
725 return; 1182 return;
726 } 1183 }
727 1184
@@ -932,7 +1389,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
932 */ 1389 */
933 bfa_ioc_msgflush(ioc); 1390 bfa_ioc_msgflush(ioc);
934 ioc->cbfn->reset_cbfn(ioc->bfa); 1391 ioc->cbfn->reset_cbfn(ioc->bfa);
935 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1392 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
936 return; 1393 return;
937 } 1394 }
938 1395
@@ -1018,7 +1475,6 @@ bfa_nw_ioc_hb_check(void *cbarg)
1018 1475
1019 hb_count = readl(ioc->ioc_regs.heartbeat); 1476 hb_count = readl(ioc->ioc_regs.heartbeat);
1020 if (ioc->hb_count == hb_count) { 1477 if (ioc->hb_count == hb_count) {
1021 pr_crit("Firmware heartbeat failure at %d", hb_count);
1022 bfa_ioc_recover(ioc); 1478 bfa_ioc_recover(ioc);
1023 return; 1479 return;
1024 } else { 1480 } else {
@@ -1189,6 +1645,55 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1189 bfa_q_deq(&mod->cmd_q, &cmd); 1645 bfa_q_deq(&mod->cmd_q, &cmd);
1190} 1646}
1191 1647
1648static void
1649bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1650{
1651 struct list_head *qe;
1652 struct bfa_ioc_hbfail_notify *notify;
1653
1654 /**
1655 * Notify driver and common modules registered for notification.
1656 */
1657 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1658 list_for_each(qe, &ioc->hb_notify_q) {
1659 notify = (struct bfa_ioc_hbfail_notify *) qe;
1660 notify->cbfn(notify->cbarg);
1661 }
1662}
1663
1664static void
1665bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1666{
1667 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1668}
1669
1670static void
1671bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1672{
1673 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1674}
1675
1676static void
1677bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
1678{
1679 bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
1680}
1681
1682static void
1683bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1684{
1685 bfa_fsm_send_event(ioc, IOC_E_PFAILED);
1686}
1687
1688static void
1689bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1690{
1691 /**
1692 * Provide enable completion callback and AEN notification.
1693 */
1694 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1695}
1696
1192/** 1697/**
1193 * IOC public 1698 * IOC public
1194 */ 1699 */
@@ -1284,6 +1789,7 @@ static void
1284bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 1789bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1285{ 1790{
1286 union bfi_ioc_i2h_msg_u *msg; 1791 union bfi_ioc_i2h_msg_u *msg;
1792 struct bfa_iocpf *iocpf = &ioc->iocpf;
1287 1793
1288 msg = (union bfi_ioc_i2h_msg_u *) m; 1794 msg = (union bfi_ioc_i2h_msg_u *) m;
1289 1795
@@ -1294,15 +1800,15 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1294 break; 1800 break;
1295 1801
1296 case BFI_IOC_I2H_READY_EVENT: 1802 case BFI_IOC_I2H_READY_EVENT:
1297 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1803 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1298 break; 1804 break;
1299 1805
1300 case BFI_IOC_I2H_ENABLE_REPLY: 1806 case BFI_IOC_I2H_ENABLE_REPLY:
1301 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); 1807 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1302 break; 1808 break;
1303 1809
1304 case BFI_IOC_I2H_DISABLE_REPLY: 1810 case BFI_IOC_I2H_DISABLE_REPLY:
1305 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); 1811 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1306 break; 1812 break;
1307 1813
1308 case BFI_IOC_I2H_GETATTR_REPLY: 1814 case BFI_IOC_I2H_GETATTR_REPLY:
@@ -1328,11 +1834,13 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1328 ioc->fcmode = false; 1834 ioc->fcmode = false;
1329 ioc->pllinit = false; 1835 ioc->pllinit = false;
1330 ioc->dbg_fwsave_once = true; 1836 ioc->dbg_fwsave_once = true;
1837 ioc->iocpf.ioc = ioc;
1331 1838
1332 bfa_ioc_mbox_attach(ioc); 1839 bfa_ioc_mbox_attach(ioc);
1333 INIT_LIST_HEAD(&ioc->hb_notify_q); 1840 INIT_LIST_HEAD(&ioc->hb_notify_q);
1334 1841
1335 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1842 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1843 bfa_fsm_send_event(ioc, IOC_E_RESET);
1336} 1844}
1337 1845
1338/** 1846/**
@@ -1637,7 +2145,40 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1637static enum bfa_ioc_state 2145static enum bfa_ioc_state
1638bfa_ioc_get_state(struct bfa_ioc *ioc) 2146bfa_ioc_get_state(struct bfa_ioc *ioc)
1639{ 2147{
1640 return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2148 enum bfa_iocpf_state iocpf_st;
2149 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2150
2151 if (ioc_st == BFA_IOC_ENABLING ||
2152 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2153
2154 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2155
2156 switch (iocpf_st) {
2157 case BFA_IOCPF_SEMWAIT:
2158 ioc_st = BFA_IOC_SEMWAIT;
2159 break;
2160
2161 case BFA_IOCPF_HWINIT:
2162 ioc_st = BFA_IOC_HWINIT;
2163 break;
2164
2165 case BFA_IOCPF_FWMISMATCH:
2166 ioc_st = BFA_IOC_FWMISMATCH;
2167 break;
2168
2169 case BFA_IOCPF_FAIL:
2170 ioc_st = BFA_IOC_FAIL;
2171 break;
2172
2173 case BFA_IOCPF_INITFAIL:
2174 ioc_st = BFA_IOC_INITFAIL;
2175 break;
2176
2177 default:
2178 break;
2179 }
2180 }
2181 return ioc_st;
1641} 2182}
1642 2183
1643void 2184void
@@ -1678,8 +2219,13 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
1678static void 2219static void
1679bfa_ioc_recover(struct bfa_ioc *ioc) 2220bfa_ioc_recover(struct bfa_ioc *ioc)
1680{ 2221{
1681 bfa_ioc_stats(ioc, ioc_hbfails); 2222 u16 bdf;
1682 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2223
2224 bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
2225 ioc->pcidev.device_id);
2226
2227 pr_crit("Firmware heartbeat failure at %d", bdf);
2228 BUG_ON(1);
1683} 2229}
1684 2230
1685static void 2231static void
@@ -1687,5 +2233,61 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1687{ 2233{
1688 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) 2234 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
1689 return; 2235 return;
2236}
2237
2238/**
2239 * @dg hal_iocpf_pvt BFA IOC PF private functions
2240 * @{
2241 */
2242
2243static void
2244bfa_iocpf_enable(struct bfa_ioc *ioc)
2245{
2246 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2247}
1690 2248
2249static void
2250bfa_iocpf_disable(struct bfa_ioc *ioc)
2251{
2252 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2253}
2254
2255static void
2256bfa_iocpf_fail(struct bfa_ioc *ioc)
2257{
2258 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2259}
2260
2261static void
2262bfa_iocpf_initfail(struct bfa_ioc *ioc)
2263{
2264 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2265}
2266
2267static void
2268bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2269{
2270 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2271}
2272
2273static void
2274bfa_iocpf_stop(struct bfa_ioc *ioc)
2275{
2276 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2277}
2278
2279void
2280bfa_nw_iocpf_timeout(void *ioc_arg)
2281{
2282 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2283
2284 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2285}
2286
2287void
2288bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2289{
2290 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2291
2292 bfa_ioc_hw_sem_get(ioc);
1691} 2293}