diff options
Diffstat (limited to 'drivers/scsi/bfa/bfa_ioc.c')
-rw-r--r-- | drivers/scsi/bfa/bfa_ioc.c | 1888 |
1 files changed, 1349 insertions, 539 deletions
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 8e78f20110a5..6795b247791a 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. | 2 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. |
3 | * All rights reserved | 3 | * All rights reserved |
4 | * www.brocade.com | 4 | * www.brocade.com |
5 | * | 5 | * |
@@ -15,35 +15,33 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <bfa.h> | 18 | #include "bfa_ioc.h" |
19 | #include <bfa_ioc.h> | 19 | #include "bfi_ctreg.h" |
20 | #include <bfa_fwimg_priv.h> | 20 | #include "bfa_defs.h" |
21 | #include <cna/bfa_cna_trcmod.h> | 21 | #include "bfa_defs_svc.h" |
22 | #include <cs/bfa_debug.h> | 22 | #include "bfad_drv.h" |
23 | #include <bfi/bfi_ioc.h> | ||
24 | #include <bfi/bfi_ctreg.h> | ||
25 | #include <aen/bfa_aen_ioc.h> | ||
26 | #include <aen/bfa_aen.h> | ||
27 | #include <log/bfa_log_hal.h> | ||
28 | #include <defs/bfa_defs_pci.h> | ||
29 | 23 | ||
30 | BFA_TRC_FILE(CNA, IOC); | 24 | BFA_TRC_FILE(CNA, IOC); |
31 | 25 | ||
32 | /** | 26 | /** |
33 | * IOC local definitions | 27 | * IOC local definitions |
34 | */ | 28 | */ |
35 | #define BFA_IOC_TOV 2000 /* msecs */ | 29 | #define BFA_IOC_TOV 3000 /* msecs */ |
36 | #define BFA_IOC_HWSEM_TOV 500 /* msecs */ | 30 | #define BFA_IOC_HWSEM_TOV 500 /* msecs */ |
37 | #define BFA_IOC_HB_TOV 500 /* msecs */ | 31 | #define BFA_IOC_HB_TOV 500 /* msecs */ |
38 | #define BFA_IOC_HWINIT_MAX 2 | 32 | #define BFA_IOC_HWINIT_MAX 2 |
39 | #define BFA_IOC_FWIMG_MINSZ (16 * 1024) | 33 | #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV |
40 | #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV | ||
41 | 34 | ||
42 | #define bfa_ioc_timer_start(__ioc) \ | 35 | #define bfa_ioc_timer_start(__ioc) \ |
43 | bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ | 36 | bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ |
44 | bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) | 37 | bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) |
45 | #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) | 38 | #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) |
46 | 39 | ||
40 | #define bfa_hb_timer_start(__ioc) \ | ||
41 | bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \ | ||
42 | bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV) | ||
43 | #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer) | ||
44 | |||
47 | #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) | 45 | #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) |
48 | #define BFA_DBG_FWTRC_LEN \ | 46 | #define BFA_DBG_FWTRC_LEN \ |
49 | (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ | 47 | (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ |
@@ -55,100 +53,226 @@ BFA_TRC_FILE(CNA, IOC); | |||
55 | * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. | 53 | * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. |
56 | */ | 54 | */ |
57 | 55 | ||
58 | #define bfa_ioc_firmware_lock(__ioc) \ | 56 | #define bfa_ioc_firmware_lock(__ioc) \ |
59 | ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) | 57 | ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) |
60 | #define bfa_ioc_firmware_unlock(__ioc) \ | 58 | #define bfa_ioc_firmware_unlock(__ioc) \ |
61 | ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) | 59 | ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) |
62 | #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) | 60 | #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) |
63 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) | 61 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) |
64 | #define bfa_ioc_notify_hbfail(__ioc) \ | 62 | #define bfa_ioc_notify_hbfail(__ioc) \ |
65 | ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) | 63 | ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) |
66 | #define bfa_ioc_is_optrom(__ioc) \ | ||
67 | (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) | ||
68 | 64 | ||
69 | bfa_boolean_t bfa_auto_recover = BFA_TRUE; | 65 | #ifdef BFA_IOC_IS_UEFI |
66 | #define bfa_ioc_is_bios_optrom(__ioc) (0) | ||
67 | #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI | ||
68 | #else | ||
69 | #define bfa_ioc_is_bios_optrom(__ioc) \ | ||
70 | (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) | ||
71 | #define bfa_ioc_is_uefi(__ioc) (0) | ||
72 | #endif | ||
73 | |||
74 | #define bfa_ioc_mbox_cmd_pending(__ioc) \ | ||
75 | (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ | ||
76 | bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd)) | ||
77 | |||
78 | bfa_boolean_t bfa_auto_recover = BFA_TRUE; | ||
70 | 79 | ||
71 | /* | 80 | /* |
72 | * forward declarations | 81 | * forward declarations |
73 | */ | 82 | */ |
74 | static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); | 83 | static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); |
75 | static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); | 84 | static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); |
76 | static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); | 85 | static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); |
77 | static void bfa_ioc_timeout(void *ioc); | 86 | static void bfa_ioc_timeout(void *ioc); |
78 | static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); | 87 | static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); |
79 | static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); | 88 | static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); |
80 | static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); | 89 | static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); |
81 | static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); | 90 | static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); |
82 | static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); | 91 | static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); |
83 | static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); | 92 | static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); |
84 | static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); | 93 | static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); |
85 | static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); | 94 | static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); |
86 | static void bfa_ioc_recover(struct bfa_ioc_s *ioc); | 95 | static void bfa_ioc_recover(struct bfa_ioc_s *ioc); |
87 | static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); | 96 | static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); |
88 | static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); | 97 | static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); |
89 | static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); | 98 | static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); |
99 | static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc); | ||
100 | static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc); | ||
101 | static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); | ||
102 | static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); | ||
90 | 103 | ||
91 | /** | 104 | /** |
92 | * bfa_ioc_sm | 105 | * hal_ioc_sm |
93 | */ | 106 | */ |
94 | 107 | ||
95 | /** | 108 | /** |
96 | * IOC state machine events | 109 | * IOC state machine definitions/declarations |
97 | */ | 110 | */ |
98 | enum ioc_event { | 111 | enum ioc_event { |
99 | IOC_E_ENABLE = 1, /* IOC enable request */ | 112 | IOC_E_RESET = 1, /* IOC reset request */ |
100 | IOC_E_DISABLE = 2, /* IOC disable request */ | 113 | IOC_E_ENABLE = 2, /* IOC enable request */ |
101 | IOC_E_TIMEOUT = 3, /* f/w response timeout */ | 114 | IOC_E_DISABLE = 3, /* IOC disable request */ |
102 | IOC_E_FWREADY = 4, /* f/w initialization done */ | 115 | IOC_E_DETACH = 4, /* driver detach cleanup */ |
103 | IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */ | 116 | IOC_E_ENABLED = 5, /* f/w enabled */ |
104 | IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */ | 117 | IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ |
105 | IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */ | 118 | IOC_E_DISABLED = 7, /* f/w disabled */ |
106 | IOC_E_HBFAIL = 8, /* heartbeat failure */ | 119 | IOC_E_FAILED = 8, /* failure notice by iocpf sm */ |
107 | IOC_E_HWERROR = 9, /* hardware error interrupt */ | 120 | IOC_E_HBFAIL = 9, /* heartbeat failure */ |
108 | IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */ | 121 | IOC_E_HWERROR = 10, /* hardware error interrupt */ |
109 | IOC_E_DETACH = 11, /* driver detach cleanup */ | 122 | IOC_E_TIMEOUT = 11, /* timeout */ |
110 | }; | 123 | }; |
111 | 124 | ||
125 | bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); | ||
112 | bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); | 126 | bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); |
113 | bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event); | ||
114 | bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event); | ||
115 | bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event); | ||
116 | bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event); | ||
117 | bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); | 127 | bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); |
118 | bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); | 128 | bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); |
119 | bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); | 129 | bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); |
120 | bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); | 130 | bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); |
121 | bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event); | 131 | bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); |
122 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); | 132 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); |
123 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); | 133 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); |
124 | 134 | ||
125 | static struct bfa_sm_table_s ioc_sm_table[] = { | 135 | static struct bfa_sm_table_s ioc_sm_table[] = { |
136 | {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, | ||
126 | {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, | 137 | {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, |
127 | {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, | 138 | {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, |
128 | {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH}, | ||
129 | {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT}, | ||
130 | {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT}, | ||
131 | {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT}, | ||
132 | {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, | 139 | {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, |
133 | {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, | 140 | {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, |
134 | {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, | 141 | {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, |
135 | {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, | 142 | {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, |
136 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, | 143 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, |
137 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, | 144 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, |
138 | }; | 145 | }; |
139 | 146 | ||
140 | /** | 147 | /** |
148 | * IOCPF state machine definitions/declarations | ||
149 | */ | ||
150 | |||
151 | #define bfa_iocpf_timer_start(__ioc) \ | ||
152 | bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ | ||
153 | bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV) | ||
154 | #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) | ||
155 | |||
156 | #define bfa_iocpf_recovery_timer_start(__ioc) \ | ||
157 | bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ | ||
158 | bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER) | ||
159 | |||
160 | #define bfa_sem_timer_start(__ioc) \ | ||
161 | bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \ | ||
162 | bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV) | ||
163 | #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer) | ||
164 | |||
165 | /* | ||
166 | * Forward declareations for iocpf state machine | ||
167 | */ | ||
168 | static void bfa_iocpf_enable(struct bfa_ioc_s *ioc); | ||
169 | static void bfa_iocpf_disable(struct bfa_ioc_s *ioc); | ||
170 | static void bfa_iocpf_fail(struct bfa_ioc_s *ioc); | ||
171 | static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc); | ||
172 | static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc); | ||
173 | static void bfa_iocpf_stop(struct bfa_ioc_s *ioc); | ||
174 | static void bfa_iocpf_timeout(void *ioc_arg); | ||
175 | static void bfa_iocpf_sem_timeout(void *ioc_arg); | ||
176 | |||
177 | /** | ||
178 | * IOCPF state machine events | ||
179 | */ | ||
180 | enum iocpf_event { | ||
181 | IOCPF_E_ENABLE = 1, /* IOCPF enable request */ | ||
182 | IOCPF_E_DISABLE = 2, /* IOCPF disable request */ | ||
183 | IOCPF_E_STOP = 3, /* stop on driver detach */ | ||
184 | IOCPF_E_FWREADY = 4, /* f/w initialization done */ | ||
185 | IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */ | ||
186 | IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */ | ||
187 | IOCPF_E_FAIL = 7, /* failure notice by ioc sm */ | ||
188 | IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */ | ||
189 | IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */ | ||
190 | IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */ | ||
191 | IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ | ||
192 | }; | ||
193 | |||
194 | /** | ||
195 | * IOCPF states | ||
196 | */ | ||
197 | enum bfa_iocpf_state { | ||
198 | BFA_IOCPF_RESET = 1, /* IOC is in reset state */ | ||
199 | BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */ | ||
200 | BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */ | ||
201 | BFA_IOCPF_READY = 4, /* IOCPF is initialized */ | ||
202 | BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */ | ||
203 | BFA_IOCPF_FAIL = 6, /* IOCPF failed */ | ||
204 | BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */ | ||
205 | BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */ | ||
206 | BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */ | ||
207 | }; | ||
208 | |||
209 | bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event); | ||
210 | bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event); | ||
211 | bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event); | ||
212 | bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event); | ||
213 | bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event); | ||
214 | bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event); | ||
215 | bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event); | ||
216 | bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event); | ||
217 | bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event); | ||
218 | bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event); | ||
219 | bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); | ||
220 | |||
221 | static struct bfa_sm_table_s iocpf_sm_table[] = { | ||
222 | {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, | ||
223 | {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, | ||
224 | {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, | ||
225 | {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, | ||
226 | {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, | ||
227 | {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, | ||
228 | {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, | ||
229 | {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, | ||
230 | {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, | ||
231 | {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, | ||
232 | {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, | ||
233 | }; | ||
234 | |||
235 | /** | ||
236 | * IOC State Machine | ||
237 | */ | ||
238 | |||
239 | /** | ||
240 | * Beginning state. IOC uninit state. | ||
241 | */ | ||
242 | |||
243 | static void | ||
244 | bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc) | ||
245 | { | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * IOC is in uninit state. | ||
250 | */ | ||
251 | static void | ||
252 | bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event) | ||
253 | { | ||
254 | bfa_trc(ioc, event); | ||
255 | |||
256 | switch (event) { | ||
257 | case IOC_E_RESET: | ||
258 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | ||
259 | break; | ||
260 | |||
261 | default: | ||
262 | bfa_sm_fault(ioc, event); | ||
263 | } | ||
264 | } | ||
265 | /** | ||
141 | * Reset entry actions -- initialize state machine | 266 | * Reset entry actions -- initialize state machine |
142 | */ | 267 | */ |
143 | static void | 268 | static void |
144 | bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) | 269 | bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) |
145 | { | 270 | { |
146 | ioc->retry_count = 0; | 271 | bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); |
147 | ioc->auto_recover = bfa_auto_recover; | ||
148 | } | 272 | } |
149 | 273 | ||
150 | /** | 274 | /** |
151 | * Beginning state. IOC is in reset state. | 275 | * IOC is in reset state. |
152 | */ | 276 | */ |
153 | static void | 277 | static void |
154 | bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) | 278 | bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) |
@@ -157,7 +281,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
157 | 281 | ||
158 | switch (event) { | 282 | switch (event) { |
159 | case IOC_E_ENABLE: | 283 | case IOC_E_ENABLE: |
160 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); | 284 | bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); |
161 | break; | 285 | break; |
162 | 286 | ||
163 | case IOC_E_DISABLE: | 287 | case IOC_E_DISABLE: |
@@ -165,6 +289,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
165 | break; | 289 | break; |
166 | 290 | ||
167 | case IOC_E_DETACH: | 291 | case IOC_E_DETACH: |
292 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
168 | break; | 293 | break; |
169 | 294 | ||
170 | default: | 295 | default: |
@@ -172,46 +297,209 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
172 | } | 297 | } |
173 | } | 298 | } |
174 | 299 | ||
300 | |||
301 | static void | ||
302 | bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) | ||
303 | { | ||
304 | bfa_iocpf_enable(ioc); | ||
305 | } | ||
306 | |||
175 | /** | 307 | /** |
176 | * Semaphore should be acquired for version check. | 308 | * Host IOC function is being enabled, awaiting response from firmware. |
309 | * Semaphore is acquired. | ||
177 | */ | 310 | */ |
178 | static void | 311 | static void |
179 | bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc) | 312 | bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) |
180 | { | 313 | { |
181 | bfa_ioc_hw_sem_get(ioc); | 314 | bfa_trc(ioc, event); |
315 | |||
316 | switch (event) { | ||
317 | case IOC_E_ENABLED: | ||
318 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | ||
319 | break; | ||
320 | |||
321 | case IOC_E_FAILED: | ||
322 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | ||
323 | break; | ||
324 | |||
325 | case IOC_E_HWERROR: | ||
326 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | ||
327 | bfa_iocpf_initfail(ioc); | ||
328 | break; | ||
329 | |||
330 | case IOC_E_DISABLE: | ||
331 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
332 | break; | ||
333 | |||
334 | case IOC_E_DETACH: | ||
335 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
336 | bfa_iocpf_stop(ioc); | ||
337 | break; | ||
338 | |||
339 | case IOC_E_ENABLE: | ||
340 | break; | ||
341 | |||
342 | default: | ||
343 | bfa_sm_fault(ioc, event); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | |||
348 | static void | ||
349 | bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) | ||
350 | { | ||
351 | bfa_ioc_timer_start(ioc); | ||
352 | bfa_ioc_send_getattr(ioc); | ||
182 | } | 353 | } |
183 | 354 | ||
184 | /** | 355 | /** |
185 | * Awaiting h/w semaphore to continue with version check. | 356 | * IOC configuration in progress. Timer is active. |
186 | */ | 357 | */ |
187 | static void | 358 | static void |
188 | bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event) | 359 | bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) |
189 | { | 360 | { |
190 | bfa_trc(ioc, event); | 361 | bfa_trc(ioc, event); |
191 | 362 | ||
192 | switch (event) { | 363 | switch (event) { |
193 | case IOC_E_SEMLOCKED: | 364 | case IOC_E_FWRSP_GETATTR: |
194 | if (bfa_ioc_firmware_lock(ioc)) { | 365 | bfa_ioc_timer_stop(ioc); |
195 | ioc->retry_count = 0; | 366 | bfa_ioc_check_attr_wwns(ioc); |
196 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | 367 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); |
197 | } else { | 368 | break; |
198 | bfa_ioc_hw_sem_release(ioc); | 369 | |
199 | bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); | 370 | case IOC_E_FAILED: |
200 | } | 371 | bfa_ioc_timer_stop(ioc); |
372 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | ||
373 | break; | ||
374 | |||
375 | case IOC_E_HWERROR: | ||
376 | bfa_ioc_timer_stop(ioc); | ||
377 | /* fall through */ | ||
378 | |||
379 | case IOC_E_TIMEOUT: | ||
380 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | ||
381 | bfa_iocpf_getattrfail(ioc); | ||
201 | break; | 382 | break; |
202 | 383 | ||
203 | case IOC_E_DISABLE: | 384 | case IOC_E_DISABLE: |
204 | bfa_ioc_disable_comp(ioc); | 385 | bfa_ioc_timer_stop(ioc); |
386 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
387 | break; | ||
388 | |||
389 | case IOC_E_ENABLE: | ||
390 | break; | ||
391 | |||
392 | default: | ||
393 | bfa_sm_fault(ioc, event); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | |||
398 | static void | ||
399 | bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) | ||
400 | { | ||
401 | struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; | ||
402 | |||
403 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); | ||
404 | bfa_ioc_hb_monitor(ioc); | ||
405 | BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n"); | ||
406 | } | ||
407 | |||
408 | static void | ||
409 | bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) | ||
410 | { | ||
411 | bfa_trc(ioc, event); | ||
412 | |||
413 | switch (event) { | ||
414 | case IOC_E_ENABLE: | ||
415 | break; | ||
416 | |||
417 | case IOC_E_DISABLE: | ||
418 | bfa_ioc_hb_stop(ioc); | ||
419 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
420 | break; | ||
421 | |||
422 | case IOC_E_FAILED: | ||
423 | bfa_ioc_hb_stop(ioc); | ||
424 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); | ||
425 | break; | ||
426 | |||
427 | case IOC_E_HWERROR: | ||
428 | bfa_ioc_hb_stop(ioc); | ||
429 | /* !!! fall through !!! */ | ||
430 | |||
431 | case IOC_E_HBFAIL: | ||
432 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); | ||
433 | bfa_iocpf_fail(ioc); | ||
434 | break; | ||
435 | |||
436 | default: | ||
437 | bfa_sm_fault(ioc, event); | ||
438 | } | ||
439 | } | ||
440 | |||
441 | |||
442 | static void | ||
443 | bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) | ||
444 | { | ||
445 | struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; | ||
446 | bfa_iocpf_disable(ioc); | ||
447 | BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); | ||
448 | } | ||
449 | |||
450 | /** | ||
451 | * IOC is being disabled | ||
452 | */ | ||
453 | static void | ||
454 | bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) | ||
455 | { | ||
456 | bfa_trc(ioc, event); | ||
457 | |||
458 | switch (event) { | ||
459 | case IOC_E_DISABLED: | ||
460 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | ||
461 | break; | ||
462 | |||
463 | case IOC_E_HWERROR: | ||
205 | /* | 464 | /* |
206 | * fall through | 465 | * No state change. Will move to disabled state |
466 | * after iocpf sm completes failure processing and | ||
467 | * moves to disabled state. | ||
207 | */ | 468 | */ |
469 | bfa_iocpf_fail(ioc); | ||
470 | break; | ||
208 | 471 | ||
209 | case IOC_E_DETACH: | 472 | default: |
210 | bfa_ioc_hw_sem_get_cancel(ioc); | 473 | bfa_sm_fault(ioc, event); |
211 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 474 | } |
475 | } | ||
476 | |||
477 | /** | ||
478 | * IOC disable completion entry. | ||
479 | */ | ||
480 | static void | ||
481 | bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) | ||
482 | { | ||
483 | bfa_ioc_disable_comp(ioc); | ||
484 | } | ||
485 | |||
486 | static void | ||
487 | bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) | ||
488 | { | ||
489 | bfa_trc(ioc, event); | ||
490 | |||
491 | switch (event) { | ||
492 | case IOC_E_ENABLE: | ||
493 | bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); | ||
494 | break; | ||
495 | |||
496 | case IOC_E_DISABLE: | ||
497 | ioc->cbfn->disable_cbfn(ioc->bfa); | ||
212 | break; | 498 | break; |
213 | 499 | ||
214 | case IOC_E_FWREADY: | 500 | case IOC_E_DETACH: |
501 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
502 | bfa_iocpf_stop(ioc); | ||
215 | break; | 503 | break; |
216 | 504 | ||
217 | default: | 505 | default: |
@@ -219,48 +507,138 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
219 | } | 507 | } |
220 | } | 508 | } |
221 | 509 | ||
510 | |||
511 | static void | ||
512 | bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) | ||
513 | { | ||
514 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | ||
515 | } | ||
516 | |||
222 | /** | 517 | /** |
223 | * Notify enable completion callback and generate mismatch AEN. | 518 | * Hardware initialization failed. |
224 | */ | 519 | */ |
225 | static void | 520 | static void |
226 | bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc) | 521 | bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) |
522 | { | ||
523 | bfa_trc(ioc, event); | ||
524 | |||
525 | switch (event) { | ||
526 | case IOC_E_ENABLED: | ||
527 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | ||
528 | break; | ||
529 | |||
530 | case IOC_E_FAILED: | ||
531 | /** | ||
532 | * Initialization failure during iocpf init retry. | ||
533 | */ | ||
534 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | ||
535 | break; | ||
536 | |||
537 | case IOC_E_DISABLE: | ||
538 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
539 | break; | ||
540 | |||
541 | case IOC_E_DETACH: | ||
542 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
543 | bfa_iocpf_stop(ioc); | ||
544 | break; | ||
545 | |||
546 | default: | ||
547 | bfa_sm_fault(ioc, event); | ||
548 | } | ||
549 | } | ||
550 | |||
551 | |||
552 | static void | ||
553 | bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) | ||
227 | { | 554 | { |
555 | struct list_head *qe; | ||
556 | struct bfa_ioc_hbfail_notify_s *notify; | ||
557 | struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; | ||
558 | |||
228 | /** | 559 | /** |
229 | * Provide enable completion callback and AEN notification only once. | 560 | * Notify driver and common modules registered for notification. |
230 | */ | 561 | */ |
231 | if (ioc->retry_count == 0) { | 562 | ioc->cbfn->hbfail_cbfn(ioc->bfa); |
232 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | 563 | list_for_each(qe, &ioc->hb_notify_q) { |
233 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); | 564 | notify = (struct bfa_ioc_hbfail_notify_s *) qe; |
565 | notify->cbfn(notify->cbarg); | ||
234 | } | 566 | } |
235 | ioc->retry_count++; | 567 | |
236 | bfa_ioc_timer_start(ioc); | 568 | BFA_LOG(KERN_CRIT, bfad, log_level, |
569 | "Heart Beat of IOC has failed\n"); | ||
237 | } | 570 | } |
238 | 571 | ||
239 | /** | 572 | /** |
240 | * Awaiting firmware version match. | 573 | * IOC failure. |
241 | */ | 574 | */ |
242 | static void | 575 | static void |
243 | bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event) | 576 | bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) |
244 | { | 577 | { |
245 | bfa_trc(ioc, event); | 578 | bfa_trc(ioc, event); |
246 | 579 | ||
247 | switch (event) { | 580 | switch (event) { |
248 | case IOC_E_TIMEOUT: | 581 | |
249 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); | 582 | case IOC_E_FAILED: |
583 | /** | ||
584 | * Initialization failure during iocpf recovery. | ||
585 | * !!! Fall through !!! | ||
586 | */ | ||
587 | case IOC_E_ENABLE: | ||
588 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | ||
589 | break; | ||
590 | |||
591 | case IOC_E_ENABLED: | ||
592 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | ||
250 | break; | 593 | break; |
251 | 594 | ||
252 | case IOC_E_DISABLE: | 595 | case IOC_E_DISABLE: |
253 | bfa_ioc_disable_comp(ioc); | 596 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); |
597 | break; | ||
598 | |||
599 | case IOC_E_HWERROR: | ||
254 | /* | 600 | /* |
255 | * fall through | 601 | * HB failure notification, ignore. |
256 | */ | 602 | */ |
603 | break; | ||
604 | default: | ||
605 | bfa_sm_fault(ioc, event); | ||
606 | } | ||
607 | } | ||
257 | 608 | ||
258 | case IOC_E_DETACH: | 609 | |
259 | bfa_ioc_timer_stop(ioc); | 610 | |
260 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 611 | /** |
612 | * IOCPF State Machine | ||
613 | */ | ||
614 | |||
615 | |||
616 | /** | ||
617 | * Reset entry actions -- initialize state machine | ||
618 | */ | ||
619 | static void | ||
620 | bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) | ||
621 | { | ||
622 | iocpf->retry_count = 0; | ||
623 | iocpf->auto_recover = bfa_auto_recover; | ||
624 | } | ||
625 | |||
626 | /** | ||
627 | * Beginning state. IOC is in reset state. | ||
628 | */ | ||
629 | static void | ||
630 | bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | ||
631 | { | ||
632 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
633 | |||
634 | bfa_trc(ioc, event); | ||
635 | |||
636 | switch (event) { | ||
637 | case IOCPF_E_ENABLE: | ||
638 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); | ||
261 | break; | 639 | break; |
262 | 640 | ||
263 | case IOC_E_FWREADY: | 641 | case IOCPF_E_STOP: |
264 | break; | 642 | break; |
265 | 643 | ||
266 | default: | 644 | default: |
@@ -269,31 +647,44 @@ bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
269 | } | 647 | } |
270 | 648 | ||
271 | /** | 649 | /** |
272 | * Request for semaphore. | 650 | * Semaphore should be acquired for version check. |
273 | */ | 651 | */ |
274 | static void | 652 | static void |
275 | bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc) | 653 | bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) |
276 | { | 654 | { |
277 | bfa_ioc_hw_sem_get(ioc); | 655 | bfa_ioc_hw_sem_get(iocpf->ioc); |
278 | } | 656 | } |
279 | 657 | ||
280 | /** | 658 | /** |
281 | * Awaiting semaphore for h/w initialzation. | 659 | * Awaiting h/w semaphore to continue with version check. |
282 | */ | 660 | */ |
283 | static void | 661 | static void |
284 | bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event) | 662 | bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
285 | { | 663 | { |
664 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
665 | |||
286 | bfa_trc(ioc, event); | 666 | bfa_trc(ioc, event); |
287 | 667 | ||
288 | switch (event) { | 668 | switch (event) { |
289 | case IOC_E_SEMLOCKED: | 669 | case IOCPF_E_SEMLOCKED: |
290 | ioc->retry_count = 0; | 670 | if (bfa_ioc_firmware_lock(ioc)) { |
291 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | 671 | iocpf->retry_count = 0; |
672 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
673 | } else { | ||
674 | bfa_ioc_hw_sem_release(ioc); | ||
675 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); | ||
676 | } | ||
292 | break; | 677 | break; |
293 | 678 | ||
294 | case IOC_E_DISABLE: | 679 | case IOCPF_E_DISABLE: |
295 | bfa_ioc_hw_sem_get_cancel(ioc); | 680 | bfa_ioc_hw_sem_get_cancel(ioc); |
296 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 681 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
682 | bfa_ioc_pf_disabled(ioc); | ||
683 | break; | ||
684 | |||
685 | case IOCPF_E_STOP: | ||
686 | bfa_ioc_hw_sem_get_cancel(ioc); | ||
687 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | ||
297 | break; | 688 | break; |
298 | 689 | ||
299 | default: | 690 | default: |
@@ -301,51 +692,81 @@ bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
301 | } | 692 | } |
302 | } | 693 | } |
303 | 694 | ||
304 | 695 | /** | |
696 | * Notify enable completion callback. | ||
697 | */ | ||
305 | static void | 698 | static void |
306 | bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc) | 699 | bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf) |
307 | { | 700 | { |
308 | bfa_ioc_timer_start(ioc); | 701 | /* |
309 | bfa_ioc_reset(ioc, BFA_FALSE); | 702 | * Call only the first time sm enters fwmismatch state. |
703 | */ | ||
704 | if (iocpf->retry_count == 0) | ||
705 | bfa_ioc_pf_fwmismatch(iocpf->ioc); | ||
706 | |||
707 | iocpf->retry_count++; | ||
708 | bfa_iocpf_timer_start(iocpf->ioc); | ||
310 | } | 709 | } |
311 | 710 | ||
312 | /** | 711 | /** |
313 | * Hardware is being initialized. Interrupts are enabled. | 712 | * Awaiting firmware version match. |
314 | * Holding hardware semaphore lock. | ||
315 | */ | 713 | */ |
316 | static void | 714 | static void |
317 | bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event) | 715 | bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
318 | { | 716 | { |
717 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
718 | |||
319 | bfa_trc(ioc, event); | 719 | bfa_trc(ioc, event); |
320 | 720 | ||
321 | switch (event) { | 721 | switch (event) { |
322 | case IOC_E_FWREADY: | 722 | case IOCPF_E_TIMEOUT: |
323 | bfa_ioc_timer_stop(ioc); | 723 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); |
324 | bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); | ||
325 | break; | 724 | break; |
326 | 725 | ||
327 | case IOC_E_HWERROR: | 726 | case IOCPF_E_DISABLE: |
328 | bfa_ioc_timer_stop(ioc); | 727 | bfa_iocpf_timer_stop(ioc); |
329 | /* | 728 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
330 | * fall through | 729 | bfa_ioc_pf_disabled(ioc); |
331 | */ | 730 | break; |
332 | 731 | ||
333 | case IOC_E_TIMEOUT: | 732 | case IOCPF_E_STOP: |
334 | ioc->retry_count++; | 733 | bfa_iocpf_timer_stop(ioc); |
335 | if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { | 734 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
336 | bfa_ioc_timer_start(ioc); | 735 | break; |
337 | bfa_ioc_reset(ioc, BFA_TRUE); | ||
338 | break; | ||
339 | } | ||
340 | 736 | ||
341 | bfa_ioc_hw_sem_release(ioc); | 737 | default: |
342 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | 738 | bfa_sm_fault(ioc, event); |
739 | } | ||
740 | } | ||
741 | |||
742 | /** | ||
743 | * Request for semaphore. | ||
744 | */ | ||
745 | static void | ||
746 | bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf) | ||
747 | { | ||
748 | bfa_ioc_hw_sem_get(iocpf->ioc); | ||
749 | } | ||
750 | |||
751 | /** | ||
752 | * Awaiting semaphore for h/w initialzation. | ||
753 | */ | ||
754 | static void | ||
755 | bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | ||
756 | { | ||
757 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
758 | |||
759 | bfa_trc(ioc, event); | ||
760 | |||
761 | switch (event) { | ||
762 | case IOCPF_E_SEMLOCKED: | ||
763 | iocpf->retry_count = 0; | ||
764 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
343 | break; | 765 | break; |
344 | 766 | ||
345 | case IOC_E_DISABLE: | 767 | case IOCPF_E_DISABLE: |
346 | bfa_ioc_hw_sem_release(ioc); | 768 | bfa_ioc_hw_sem_get_cancel(ioc); |
347 | bfa_ioc_timer_stop(ioc); | 769 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); |
348 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | ||
349 | break; | 770 | break; |
350 | 771 | ||
351 | default: | 772 | default: |
@@ -355,55 +776,54 @@ bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
355 | 776 | ||
356 | 777 | ||
357 | static void | 778 | static void |
358 | bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) | 779 | bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) |
359 | { | 780 | { |
360 | bfa_ioc_timer_start(ioc); | 781 | bfa_iocpf_timer_start(iocpf->ioc); |
361 | bfa_ioc_send_enable(ioc); | 782 | bfa_ioc_reset(iocpf->ioc, BFA_FALSE); |
362 | } | 783 | } |
363 | 784 | ||
364 | /** | 785 | /** |
365 | * Host IOC function is being enabled, awaiting response from firmware. | 786 | * Hardware is being initialized. Interrupts are enabled. |
366 | * Semaphore is acquired. | 787 | * Holding hardware semaphore lock. |
367 | */ | 788 | */ |
368 | static void | 789 | static void |
369 | bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) | 790 | bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
370 | { | 791 | { |
792 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
793 | |||
371 | bfa_trc(ioc, event); | 794 | bfa_trc(ioc, event); |
372 | 795 | ||
373 | switch (event) { | 796 | switch (event) { |
374 | case IOC_E_FWRSP_ENABLE: | 797 | case IOCPF_E_FWREADY: |
375 | bfa_ioc_timer_stop(ioc); | 798 | bfa_iocpf_timer_stop(ioc); |
376 | bfa_ioc_hw_sem_release(ioc); | 799 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); |
377 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | ||
378 | break; | 800 | break; |
379 | 801 | ||
380 | case IOC_E_HWERROR: | 802 | case IOCPF_E_INITFAIL: |
381 | bfa_ioc_timer_stop(ioc); | 803 | bfa_iocpf_timer_stop(ioc); |
382 | /* | 804 | /* |
383 | * fall through | 805 | * !!! fall through !!! |
384 | */ | 806 | */ |
385 | 807 | ||
386 | case IOC_E_TIMEOUT: | 808 | case IOCPF_E_TIMEOUT: |
387 | ioc->retry_count++; | 809 | iocpf->retry_count++; |
388 | if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { | 810 | if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { |
389 | bfa_reg_write(ioc->ioc_regs.ioc_fwstate, | 811 | bfa_iocpf_timer_start(ioc); |
390 | BFI_IOC_UNINIT); | 812 | bfa_ioc_reset(ioc, BFA_TRUE); |
391 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | ||
392 | break; | 813 | break; |
393 | } | 814 | } |
394 | 815 | ||
395 | bfa_ioc_hw_sem_release(ioc); | 816 | bfa_ioc_hw_sem_release(ioc); |
396 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | 817 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); |
397 | break; | ||
398 | 818 | ||
399 | case IOC_E_DISABLE: | 819 | if (event == IOCPF_E_TIMEOUT) |
400 | bfa_ioc_timer_stop(ioc); | 820 | bfa_ioc_pf_failed(ioc); |
401 | bfa_ioc_hw_sem_release(ioc); | ||
402 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | ||
403 | break; | 821 | break; |
404 | 822 | ||
405 | case IOC_E_FWREADY: | 823 | case IOCPF_E_DISABLE: |
406 | bfa_ioc_send_enable(ioc); | 824 | bfa_ioc_hw_sem_release(ioc); |
825 | bfa_iocpf_timer_stop(ioc); | ||
826 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | ||
407 | break; | 827 | break; |
408 | 828 | ||
409 | default: | 829 | default: |
@@ -413,40 +833,60 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
413 | 833 | ||
414 | 834 | ||
415 | static void | 835 | static void |
416 | bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) | 836 | bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) |
417 | { | 837 | { |
418 | bfa_ioc_timer_start(ioc); | 838 | bfa_iocpf_timer_start(iocpf->ioc); |
419 | bfa_ioc_send_getattr(ioc); | 839 | bfa_ioc_send_enable(iocpf->ioc); |
420 | } | 840 | } |
421 | 841 | ||
422 | /** | 842 | /** |
423 | * IOC configuration in progress. Timer is active. | 843 | * Host IOC function is being enabled, awaiting response from firmware. |
844 | * Semaphore is acquired. | ||
424 | */ | 845 | */ |
425 | static void | 846 | static void |
426 | bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) | 847 | bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
427 | { | 848 | { |
849 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
850 | |||
428 | bfa_trc(ioc, event); | 851 | bfa_trc(ioc, event); |
429 | 852 | ||
430 | switch (event) { | 853 | switch (event) { |
431 | case IOC_E_FWRSP_GETATTR: | 854 | case IOCPF_E_FWRSP_ENABLE: |
432 | bfa_ioc_timer_stop(ioc); | 855 | bfa_iocpf_timer_stop(ioc); |
433 | bfa_ioc_check_attr_wwns(ioc); | 856 | bfa_ioc_hw_sem_release(ioc); |
434 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); | 857 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); |
435 | break; | 858 | break; |
436 | 859 | ||
437 | case IOC_E_HWERROR: | 860 | case IOCPF_E_INITFAIL: |
438 | bfa_ioc_timer_stop(ioc); | 861 | bfa_iocpf_timer_stop(ioc); |
439 | /* | 862 | /* |
440 | * fall through | 863 | * !!! fall through !!! |
441 | */ | 864 | */ |
442 | 865 | ||
443 | case IOC_E_TIMEOUT: | 866 | case IOCPF_E_TIMEOUT: |
444 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | 867 | iocpf->retry_count++; |
868 | if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { | ||
869 | bfa_reg_write(ioc->ioc_regs.ioc_fwstate, | ||
870 | BFI_IOC_UNINIT); | ||
871 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
872 | break; | ||
873 | } | ||
874 | |||
875 | bfa_ioc_hw_sem_release(ioc); | ||
876 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | ||
877 | |||
878 | if (event == IOCPF_E_TIMEOUT) | ||
879 | bfa_ioc_pf_failed(ioc); | ||
445 | break; | 880 | break; |
446 | 881 | ||
447 | case IOC_E_DISABLE: | 882 | case IOCPF_E_DISABLE: |
448 | bfa_ioc_timer_stop(ioc); | 883 | bfa_iocpf_timer_stop(ioc); |
449 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 884 | bfa_ioc_hw_sem_release(ioc); |
885 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); | ||
886 | break; | ||
887 | |||
888 | case IOCPF_E_FWREADY: | ||
889 | bfa_ioc_send_enable(ioc); | ||
450 | break; | 890 | break; |
451 | 891 | ||
452 | default: | 892 | default: |
@@ -455,41 +895,40 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
455 | } | 895 | } |
456 | 896 | ||
457 | 897 | ||
898 | |||
458 | static void | 899 | static void |
459 | bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) | 900 | bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf) |
460 | { | 901 | { |
461 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); | 902 | bfa_ioc_pf_enabled(iocpf->ioc); |
462 | bfa_ioc_hb_monitor(ioc); | ||
463 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); | ||
464 | } | 903 | } |
465 | 904 | ||
466 | static void | 905 | static void |
467 | bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) | 906 | bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
468 | { | 907 | { |
908 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
909 | |||
469 | bfa_trc(ioc, event); | 910 | bfa_trc(ioc, event); |
470 | 911 | ||
471 | switch (event) { | 912 | switch (event) { |
472 | case IOC_E_ENABLE: | 913 | case IOCPF_E_DISABLE: |
914 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); | ||
473 | break; | 915 | break; |
474 | 916 | ||
475 | case IOC_E_DISABLE: | 917 | case IOCPF_E_GETATTRFAIL: |
476 | bfa_ioc_hb_stop(ioc); | 918 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); |
477 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
478 | break; | 919 | break; |
479 | 920 | ||
480 | case IOC_E_HWERROR: | 921 | case IOCPF_E_FAIL: |
481 | case IOC_E_FWREADY: | 922 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); |
482 | /** | 923 | break; |
483 | * Hard error or IOC recovery by other function. | ||
484 | * Treat it same as heartbeat failure. | ||
485 | */ | ||
486 | bfa_ioc_hb_stop(ioc); | ||
487 | /* | ||
488 | * !!! fall through !!! | ||
489 | */ | ||
490 | 924 | ||
491 | case IOC_E_HBFAIL: | 925 | case IOCPF_E_FWREADY: |
492 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); | 926 | if (bfa_ioc_is_operational(ioc)) |
927 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); | ||
928 | else | ||
929 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | ||
930 | |||
931 | bfa_ioc_pf_failed(ioc); | ||
493 | break; | 932 | break; |
494 | 933 | ||
495 | default: | 934 | default: |
@@ -499,36 +938,41 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
499 | 938 | ||
500 | 939 | ||
501 | static void | 940 | static void |
502 | bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) | 941 | bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf) |
503 | { | 942 | { |
504 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); | 943 | bfa_iocpf_timer_start(iocpf->ioc); |
505 | bfa_ioc_timer_start(ioc); | 944 | bfa_ioc_send_disable(iocpf->ioc); |
506 | bfa_ioc_send_disable(ioc); | ||
507 | } | 945 | } |
508 | 946 | ||
509 | /** | 947 | /** |
510 | * IOC is being disabled | 948 | * IOC is being disabled |
511 | */ | 949 | */ |
512 | static void | 950 | static void |
513 | bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) | 951 | bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
514 | { | 952 | { |
953 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
954 | |||
515 | bfa_trc(ioc, event); | 955 | bfa_trc(ioc, event); |
516 | 956 | ||
517 | switch (event) { | 957 | switch (event) { |
518 | case IOC_E_FWRSP_DISABLE: | 958 | case IOCPF_E_FWRSP_DISABLE: |
519 | bfa_ioc_timer_stop(ioc); | 959 | case IOCPF_E_FWREADY: |
520 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 960 | bfa_iocpf_timer_stop(ioc); |
961 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | ||
521 | break; | 962 | break; |
522 | 963 | ||
523 | case IOC_E_HWERROR: | 964 | case IOCPF_E_FAIL: |
524 | bfa_ioc_timer_stop(ioc); | 965 | bfa_iocpf_timer_stop(ioc); |
525 | /* | 966 | /* |
526 | * !!! fall through !!! | 967 | * !!! fall through !!! |
527 | */ | 968 | */ |
528 | 969 | ||
529 | case IOC_E_TIMEOUT: | 970 | case IOCPF_E_TIMEOUT: |
530 | bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); | 971 | bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); |
531 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 972 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); |
973 | break; | ||
974 | |||
975 | case IOCPF_E_FWRSP_ENABLE: | ||
532 | break; | 976 | break; |
533 | 977 | ||
534 | default: | 978 | default: |
@@ -540,31 +984,26 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
540 | * IOC disable completion entry. | 984 | * IOC disable completion entry. |
541 | */ | 985 | */ |
542 | static void | 986 | static void |
543 | bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) | 987 | bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) |
544 | { | 988 | { |
545 | bfa_ioc_disable_comp(ioc); | 989 | bfa_ioc_pf_disabled(iocpf->ioc); |
546 | } | 990 | } |
547 | 991 | ||
548 | static void | 992 | static void |
549 | bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) | 993 | bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
550 | { | 994 | { |
995 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
996 | |||
551 | bfa_trc(ioc, event); | 997 | bfa_trc(ioc, event); |
552 | 998 | ||
553 | switch (event) { | 999 | switch (event) { |
554 | case IOC_E_ENABLE: | 1000 | case IOCPF_E_ENABLE: |
555 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | 1001 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); |
556 | break; | 1002 | break; |
557 | 1003 | ||
558 | case IOC_E_DISABLE: | 1004 | case IOCPF_E_STOP: |
559 | ioc->cbfn->disable_cbfn(ioc->bfa); | ||
560 | break; | ||
561 | |||
562 | case IOC_E_FWREADY: | ||
563 | break; | ||
564 | |||
565 | case IOC_E_DETACH: | ||
566 | bfa_ioc_firmware_unlock(ioc); | 1005 | bfa_ioc_firmware_unlock(ioc); |
567 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 1006 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
568 | break; | 1007 | break; |
569 | 1008 | ||
570 | default: | 1009 | default: |
@@ -574,34 +1013,35 @@ bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
574 | 1013 | ||
575 | 1014 | ||
576 | static void | 1015 | static void |
577 | bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) | 1016 | bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) |
578 | { | 1017 | { |
579 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | 1018 | bfa_iocpf_timer_start(iocpf->ioc); |
580 | bfa_ioc_timer_start(ioc); | ||
581 | } | 1019 | } |
582 | 1020 | ||
583 | /** | 1021 | /** |
584 | * Hardware initialization failed. | 1022 | * Hardware initialization failed. |
585 | */ | 1023 | */ |
586 | static void | 1024 | static void |
587 | bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) | 1025 | bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
588 | { | 1026 | { |
1027 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
1028 | |||
589 | bfa_trc(ioc, event); | 1029 | bfa_trc(ioc, event); |
590 | 1030 | ||
591 | switch (event) { | 1031 | switch (event) { |
592 | case IOC_E_DISABLE: | 1032 | case IOCPF_E_DISABLE: |
593 | bfa_ioc_timer_stop(ioc); | 1033 | bfa_iocpf_timer_stop(ioc); |
594 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 1034 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); |
595 | break; | 1035 | break; |
596 | 1036 | ||
597 | case IOC_E_DETACH: | 1037 | case IOCPF_E_STOP: |
598 | bfa_ioc_timer_stop(ioc); | 1038 | bfa_iocpf_timer_stop(ioc); |
599 | bfa_ioc_firmware_unlock(ioc); | 1039 | bfa_ioc_firmware_unlock(ioc); |
600 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 1040 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
601 | break; | 1041 | break; |
602 | 1042 | ||
603 | case IOC_E_TIMEOUT: | 1043 | case IOCPF_E_TIMEOUT: |
604 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | 1044 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); |
605 | break; | 1045 | break; |
606 | 1046 | ||
607 | default: | 1047 | default: |
@@ -611,80 +1051,47 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
611 | 1051 | ||
612 | 1052 | ||
613 | static void | 1053 | static void |
614 | bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc) | 1054 | bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) |
615 | { | 1055 | { |
616 | struct list_head *qe; | ||
617 | struct bfa_ioc_hbfail_notify_s *notify; | ||
618 | |||
619 | /** | 1056 | /** |
620 | * Mark IOC as failed in hardware and stop firmware. | 1057 | * Mark IOC as failed in hardware and stop firmware. |
621 | */ | 1058 | */ |
622 | bfa_ioc_lpu_stop(ioc); | 1059 | bfa_ioc_lpu_stop(iocpf->ioc); |
623 | bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); | 1060 | bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); |
624 | 1061 | ||
625 | /** | 1062 | /** |
626 | * Notify other functions on HB failure. | 1063 | * Notify other functions on HB failure. |
627 | */ | 1064 | */ |
628 | bfa_ioc_notify_hbfail(ioc); | 1065 | bfa_ioc_notify_hbfail(iocpf->ioc); |
629 | |||
630 | /** | ||
631 | * Notify driver and common modules registered for notification. | ||
632 | */ | ||
633 | ioc->cbfn->hbfail_cbfn(ioc->bfa); | ||
634 | list_for_each(qe, &ioc->hb_notify_q) { | ||
635 | notify = (struct bfa_ioc_hbfail_notify_s *)qe; | ||
636 | notify->cbfn(notify->cbarg); | ||
637 | } | ||
638 | 1066 | ||
639 | /** | 1067 | /** |
640 | * Flush any queued up mailbox requests. | 1068 | * Flush any queued up mailbox requests. |
641 | */ | 1069 | */ |
642 | bfa_ioc_mbox_hbfail(ioc); | 1070 | bfa_ioc_mbox_hbfail(iocpf->ioc); |
643 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL); | ||
644 | 1071 | ||
645 | /** | 1072 | if (iocpf->auto_recover) |
646 | * Trigger auto-recovery after a delay. | 1073 | bfa_iocpf_recovery_timer_start(iocpf->ioc); |
647 | */ | ||
648 | if (ioc->auto_recover) { | ||
649 | bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, | ||
650 | bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER); | ||
651 | } | ||
652 | } | 1074 | } |
653 | 1075 | ||
654 | /** | 1076 | /** |
655 | * IOC heartbeat failure. | 1077 | * IOC is in failed state. |
656 | */ | 1078 | */ |
657 | static void | 1079 | static void |
658 | bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) | 1080 | bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) |
659 | { | 1081 | { |
1082 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
1083 | |||
660 | bfa_trc(ioc, event); | 1084 | bfa_trc(ioc, event); |
661 | 1085 | ||
662 | switch (event) { | 1086 | switch (event) { |
663 | 1087 | case IOCPF_E_DISABLE: | |
664 | case IOC_E_ENABLE: | 1088 | if (iocpf->auto_recover) |
665 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | 1089 | bfa_iocpf_timer_stop(ioc); |
666 | break; | 1090 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); |
667 | |||
668 | case IOC_E_DISABLE: | ||
669 | if (ioc->auto_recover) | ||
670 | bfa_ioc_timer_stop(ioc); | ||
671 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | ||
672 | break; | 1091 | break; |
673 | 1092 | ||
674 | case IOC_E_TIMEOUT: | 1093 | case IOCPF_E_TIMEOUT: |
675 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | 1094 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); |
676 | break; | ||
677 | |||
678 | case IOC_E_FWREADY: | ||
679 | /** | ||
680 | * Recovery is already initiated by other function. | ||
681 | */ | ||
682 | break; | ||
683 | |||
684 | case IOC_E_HWERROR: | ||
685 | /* | ||
686 | * HB failure notification, ignore. | ||
687 | */ | ||
688 | break; | 1095 | break; |
689 | 1096 | ||
690 | default: | 1097 | default: |
@@ -695,14 +1102,14 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
695 | 1102 | ||
696 | 1103 | ||
697 | /** | 1104 | /** |
698 | * bfa_ioc_pvt BFA IOC private functions | 1105 | * hal_ioc_pvt BFA IOC private functions |
699 | */ | 1106 | */ |
700 | 1107 | ||
701 | static void | 1108 | static void |
702 | bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) | 1109 | bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) |
703 | { | 1110 | { |
704 | struct list_head *qe; | 1111 | struct list_head *qe; |
705 | struct bfa_ioc_hbfail_notify_s *notify; | 1112 | struct bfa_ioc_hbfail_notify_s *notify; |
706 | 1113 | ||
707 | ioc->cbfn->disable_cbfn(ioc->bfa); | 1114 | ioc->cbfn->disable_cbfn(ioc->bfa); |
708 | 1115 | ||
@@ -710,25 +1117,17 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) | |||
710 | * Notify common modules registered for notification. | 1117 | * Notify common modules registered for notification. |
711 | */ | 1118 | */ |
712 | list_for_each(qe, &ioc->hb_notify_q) { | 1119 | list_for_each(qe, &ioc->hb_notify_q) { |
713 | notify = (struct bfa_ioc_hbfail_notify_s *)qe; | 1120 | notify = (struct bfa_ioc_hbfail_notify_s *) qe; |
714 | notify->cbfn(notify->cbarg); | 1121 | notify->cbfn(notify->cbarg); |
715 | } | 1122 | } |
716 | } | 1123 | } |
717 | 1124 | ||
718 | void | ||
719 | bfa_ioc_sem_timeout(void *ioc_arg) | ||
720 | { | ||
721 | struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; | ||
722 | |||
723 | bfa_ioc_hw_sem_get(ioc); | ||
724 | } | ||
725 | |||
726 | bfa_boolean_t | 1125 | bfa_boolean_t |
727 | bfa_ioc_sem_get(bfa_os_addr_t sem_reg) | 1126 | bfa_ioc_sem_get(bfa_os_addr_t sem_reg) |
728 | { | 1127 | { |
729 | u32 r32; | 1128 | u32 r32; |
730 | int cnt = 0; | 1129 | int cnt = 0; |
731 | #define BFA_SEM_SPINCNT 3000 | 1130 | #define BFA_SEM_SPINCNT 3000 |
732 | 1131 | ||
733 | r32 = bfa_reg_read(sem_reg); | 1132 | r32 = bfa_reg_read(sem_reg); |
734 | 1133 | ||
@@ -754,7 +1153,7 @@ bfa_ioc_sem_release(bfa_os_addr_t sem_reg) | |||
754 | static void | 1153 | static void |
755 | bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) | 1154 | bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) |
756 | { | 1155 | { |
757 | u32 r32; | 1156 | u32 r32; |
758 | 1157 | ||
759 | /** | 1158 | /** |
760 | * First read to the semaphore register will return 0, subsequent reads | 1159 | * First read to the semaphore register will return 0, subsequent reads |
@@ -762,12 +1161,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) | |||
762 | */ | 1161 | */ |
763 | r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); | 1162 | r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); |
764 | if (r32 == 0) { | 1163 | if (r32 == 0) { |
765 | bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); | 1164 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); |
766 | return; | 1165 | return; |
767 | } | 1166 | } |
768 | 1167 | ||
769 | bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, | 1168 | bfa_sem_timer_start(ioc); |
770 | ioc, BFA_IOC_HWSEM_TOV); | ||
771 | } | 1169 | } |
772 | 1170 | ||
773 | void | 1171 | void |
@@ -779,7 +1177,7 @@ bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) | |||
779 | static void | 1177 | static void |
780 | bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) | 1178 | bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) |
781 | { | 1179 | { |
782 | bfa_timer_stop(&ioc->sem_timer); | 1180 | bfa_sem_timer_stop(ioc); |
783 | } | 1181 | } |
784 | 1182 | ||
785 | /** | 1183 | /** |
@@ -788,14 +1186,18 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) | |||
788 | static void | 1186 | static void |
789 | bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) | 1187 | bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) |
790 | { | 1188 | { |
791 | u32 pss_ctl; | 1189 | u32 pss_ctl; |
792 | int i; | 1190 | int i; |
793 | #define PSS_LMEM_INIT_TIME 10000 | 1191 | #define PSS_LMEM_INIT_TIME 10000 |
794 | 1192 | ||
795 | pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); | 1193 | pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); |
796 | pss_ctl &= ~__PSS_LMEM_RESET; | 1194 | pss_ctl &= ~__PSS_LMEM_RESET; |
797 | pss_ctl |= __PSS_LMEM_INIT_EN; | 1195 | pss_ctl |= __PSS_LMEM_INIT_EN; |
798 | pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */ | 1196 | |
1197 | /* | ||
1198 | * i2c workaround 12.5khz clock | ||
1199 | */ | ||
1200 | pss_ctl |= __PSS_I2C_CLK_DIV(3UL); | ||
799 | bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); | 1201 | bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); |
800 | 1202 | ||
801 | /** | 1203 | /** |
@@ -821,7 +1223,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) | |||
821 | static void | 1223 | static void |
822 | bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) | 1224 | bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) |
823 | { | 1225 | { |
824 | u32 pss_ctl; | 1226 | u32 pss_ctl; |
825 | 1227 | ||
826 | /** | 1228 | /** |
827 | * Take processor out of reset. | 1229 | * Take processor out of reset. |
@@ -835,7 +1237,7 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) | |||
835 | static void | 1237 | static void |
836 | bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) | 1238 | bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) |
837 | { | 1239 | { |
838 | u32 pss_ctl; | 1240 | u32 pss_ctl; |
839 | 1241 | ||
840 | /** | 1242 | /** |
841 | * Put processors in reset. | 1243 | * Put processors in reset. |
@@ -852,10 +1254,10 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) | |||
852 | void | 1254 | void |
853 | bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) | 1255 | bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) |
854 | { | 1256 | { |
855 | u32 pgnum, pgoff; | 1257 | u32 pgnum, pgoff; |
856 | u32 loff = 0; | 1258 | u32 loff = 0; |
857 | int i; | 1259 | int i; |
858 | u32 *fwsig = (u32 *) fwhdr; | 1260 | u32 *fwsig = (u32 *) fwhdr; |
859 | 1261 | ||
860 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | 1262 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); |
861 | pgoff = bfa_ioc_smem_pgoff(ioc, loff); | 1263 | pgoff = bfa_ioc_smem_pgoff(ioc, loff); |
@@ -863,7 +1265,8 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) | |||
863 | 1265 | ||
864 | for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); | 1266 | for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); |
865 | i++) { | 1267 | i++) { |
866 | fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); | 1268 | fwsig[i] = |
1269 | bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); | ||
867 | loff += sizeof(u32); | 1270 | loff += sizeof(u32); |
868 | } | 1271 | } |
869 | } | 1272 | } |
@@ -875,10 +1278,10 @@ bfa_boolean_t | |||
875 | bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) | 1278 | bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) |
876 | { | 1279 | { |
877 | struct bfi_ioc_image_hdr_s *drv_fwhdr; | 1280 | struct bfi_ioc_image_hdr_s *drv_fwhdr; |
878 | int i; | 1281 | int i; |
879 | 1282 | ||
880 | drv_fwhdr = (struct bfi_ioc_image_hdr_s *) | 1283 | drv_fwhdr = (struct bfi_ioc_image_hdr_s *) |
881 | bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); | 1284 | bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); |
882 | 1285 | ||
883 | for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { | 1286 | for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { |
884 | if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { | 1287 | if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { |
@@ -897,21 +1300,20 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) | |||
897 | * Return true if current running version is valid. Firmware signature and | 1300 | * Return true if current running version is valid. Firmware signature and |
898 | * execution context (driver/bios) must match. | 1301 | * execution context (driver/bios) must match. |
899 | */ | 1302 | */ |
900 | static bfa_boolean_t | 1303 | static bfa_boolean_t |
901 | bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) | 1304 | bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) |
902 | { | 1305 | { |
903 | struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; | 1306 | struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; |
904 | 1307 | ||
905 | /** | 1308 | /** |
906 | * If bios/efi boot (flash based) -- return true | 1309 | * If bios/efi boot (flash based) -- return true |
907 | */ | 1310 | */ |
908 | if (bfa_ioc_is_optrom(ioc)) | 1311 | if (bfa_ioc_is_bios_optrom(ioc)) |
909 | return BFA_TRUE; | 1312 | return BFA_TRUE; |
910 | 1313 | ||
911 | bfa_ioc_fwver_get(ioc, &fwhdr); | 1314 | bfa_ioc_fwver_get(ioc, &fwhdr); |
912 | drv_fwhdr = (struct bfi_ioc_image_hdr_s *) | 1315 | drv_fwhdr = (struct bfi_ioc_image_hdr_s *) |
913 | bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); | 1316 | bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); |
914 | |||
915 | 1317 | ||
916 | if (fwhdr.signature != drv_fwhdr->signature) { | 1318 | if (fwhdr.signature != drv_fwhdr->signature) { |
917 | bfa_trc(ioc, fwhdr.signature); | 1319 | bfa_trc(ioc, fwhdr.signature); |
@@ -919,9 +1321,9 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) | |||
919 | return BFA_FALSE; | 1321 | return BFA_FALSE; |
920 | } | 1322 | } |
921 | 1323 | ||
922 | if (fwhdr.exec != drv_fwhdr->exec) { | 1324 | if (bfa_os_swap32(fwhdr.param) != boot_env) { |
923 | bfa_trc(ioc, fwhdr.exec); | 1325 | bfa_trc(ioc, fwhdr.param); |
924 | bfa_trc(ioc, drv_fwhdr->exec); | 1326 | bfa_trc(ioc, boot_env); |
925 | return BFA_FALSE; | 1327 | return BFA_FALSE; |
926 | } | 1328 | } |
927 | 1329 | ||
@@ -934,7 +1336,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) | |||
934 | static void | 1336 | static void |
935 | bfa_ioc_msgflush(struct bfa_ioc_s *ioc) | 1337 | bfa_ioc_msgflush(struct bfa_ioc_s *ioc) |
936 | { | 1338 | { |
937 | u32 r32; | 1339 | u32 r32; |
938 | 1340 | ||
939 | r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); | 1341 | r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); |
940 | if (r32) | 1342 | if (r32) |
@@ -946,7 +1348,9 @@ static void | |||
946 | bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) | 1348 | bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) |
947 | { | 1349 | { |
948 | enum bfi_ioc_state ioc_fwstate; | 1350 | enum bfi_ioc_state ioc_fwstate; |
949 | bfa_boolean_t fwvalid; | 1351 | bfa_boolean_t fwvalid; |
1352 | u32 boot_type; | ||
1353 | u32 boot_env; | ||
950 | 1354 | ||
951 | ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); | 1355 | ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); |
952 | 1356 | ||
@@ -955,14 +1359,33 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) | |||
955 | 1359 | ||
956 | bfa_trc(ioc, ioc_fwstate); | 1360 | bfa_trc(ioc, ioc_fwstate); |
957 | 1361 | ||
1362 | boot_type = BFI_BOOT_TYPE_NORMAL; | ||
1363 | boot_env = BFI_BOOT_LOADER_OS; | ||
1364 | |||
1365 | /** | ||
1366 | * Flash based firmware boot BIOS env. | ||
1367 | */ | ||
1368 | if (bfa_ioc_is_bios_optrom(ioc)) { | ||
1369 | boot_type = BFI_BOOT_TYPE_FLASH; | ||
1370 | boot_env = BFI_BOOT_LOADER_BIOS; | ||
1371 | } | ||
1372 | |||
1373 | /** | ||
1374 | * Flash based firmware boot UEFI env. | ||
1375 | */ | ||
1376 | if (bfa_ioc_is_uefi(ioc)) { | ||
1377 | boot_type = BFI_BOOT_TYPE_FLASH; | ||
1378 | boot_env = BFI_BOOT_LOADER_UEFI; | ||
1379 | } | ||
1380 | |||
958 | /** | 1381 | /** |
959 | * check if firmware is valid | 1382 | * check if firmware is valid |
960 | */ | 1383 | */ |
961 | fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? | 1384 | fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? |
962 | BFA_FALSE : bfa_ioc_fwver_valid(ioc); | 1385 | BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env); |
963 | 1386 | ||
964 | if (!fwvalid) { | 1387 | if (!fwvalid) { |
965 | bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); | 1388 | bfa_ioc_boot(ioc, boot_type, boot_env); |
966 | return; | 1389 | return; |
967 | } | 1390 | } |
968 | 1391 | ||
@@ -971,7 +1394,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) | |||
971 | * just wait for an initialization completion interrupt. | 1394 | * just wait for an initialization completion interrupt. |
972 | */ | 1395 | */ |
973 | if (ioc_fwstate == BFI_IOC_INITING) { | 1396 | if (ioc_fwstate == BFI_IOC_INITING) { |
974 | bfa_trc(ioc, ioc_fwstate); | ||
975 | ioc->cbfn->reset_cbfn(ioc->bfa); | 1397 | ioc->cbfn->reset_cbfn(ioc->bfa); |
976 | return; | 1398 | return; |
977 | } | 1399 | } |
@@ -985,8 +1407,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) | |||
985 | * is loaded. | 1407 | * is loaded. |
986 | */ | 1408 | */ |
987 | if (ioc_fwstate == BFI_IOC_DISABLED || | 1409 | if (ioc_fwstate == BFI_IOC_DISABLED || |
988 | (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { | 1410 | (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { |
989 | bfa_trc(ioc, ioc_fwstate); | ||
990 | 1411 | ||
991 | /** | 1412 | /** |
992 | * When using MSI-X any pending firmware ready event should | 1413 | * When using MSI-X any pending firmware ready event should |
@@ -994,20 +1415,20 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) | |||
994 | */ | 1415 | */ |
995 | bfa_ioc_msgflush(ioc); | 1416 | bfa_ioc_msgflush(ioc); |
996 | ioc->cbfn->reset_cbfn(ioc->bfa); | 1417 | ioc->cbfn->reset_cbfn(ioc->bfa); |
997 | bfa_fsm_send_event(ioc, IOC_E_FWREADY); | 1418 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); |
998 | return; | 1419 | return; |
999 | } | 1420 | } |
1000 | 1421 | ||
1001 | /** | 1422 | /** |
1002 | * Initialize the h/w for any other states. | 1423 | * Initialize the h/w for any other states. |
1003 | */ | 1424 | */ |
1004 | bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); | 1425 | bfa_ioc_boot(ioc, boot_type, boot_env); |
1005 | } | 1426 | } |
1006 | 1427 | ||
1007 | static void | 1428 | static void |
1008 | bfa_ioc_timeout(void *ioc_arg) | 1429 | bfa_ioc_timeout(void *ioc_arg) |
1009 | { | 1430 | { |
1010 | struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; | 1431 | struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; |
1011 | 1432 | ||
1012 | bfa_trc(ioc, 0); | 1433 | bfa_trc(ioc, 0); |
1013 | bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); | 1434 | bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); |
@@ -1016,8 +1437,8 @@ bfa_ioc_timeout(void *ioc_arg) | |||
1016 | void | 1437 | void |
1017 | bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) | 1438 | bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) |
1018 | { | 1439 | { |
1019 | u32 *msgp = (u32 *) ioc_msg; | 1440 | u32 *msgp = (u32 *) ioc_msg; |
1020 | u32 i; | 1441 | u32 i; |
1021 | 1442 | ||
1022 | bfa_trc(ioc, msgp[0]); | 1443 | bfa_trc(ioc, msgp[0]); |
1023 | bfa_trc(ioc, len); | 1444 | bfa_trc(ioc, len); |
@@ -1038,17 +1459,20 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) | |||
1038 | * write 1 to mailbox CMD to trigger LPU event | 1459 | * write 1 to mailbox CMD to trigger LPU event |
1039 | */ | 1460 | */ |
1040 | bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); | 1461 | bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); |
1041 | (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); | 1462 | (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); |
1042 | } | 1463 | } |
1043 | 1464 | ||
1044 | static void | 1465 | static void |
1045 | bfa_ioc_send_enable(struct bfa_ioc_s *ioc) | 1466 | bfa_ioc_send_enable(struct bfa_ioc_s *ioc) |
1046 | { | 1467 | { |
1047 | struct bfi_ioc_ctrl_req_s enable_req; | 1468 | struct bfi_ioc_ctrl_req_s enable_req; |
1469 | struct bfa_timeval_s tv; | ||
1048 | 1470 | ||
1049 | bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, | 1471 | bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, |
1050 | bfa_ioc_portid(ioc)); | 1472 | bfa_ioc_portid(ioc)); |
1051 | enable_req.ioc_class = ioc->ioc_mc; | 1473 | enable_req.ioc_class = ioc->ioc_mc; |
1474 | bfa_os_gettimeofday(&tv); | ||
1475 | enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec); | ||
1052 | bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); | 1476 | bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); |
1053 | } | 1477 | } |
1054 | 1478 | ||
@@ -1065,7 +1489,7 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc) | |||
1065 | static void | 1489 | static void |
1066 | bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) | 1490 | bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) |
1067 | { | 1491 | { |
1068 | struct bfi_ioc_getattr_req_s attr_req; | 1492 | struct bfi_ioc_getattr_req_s attr_req; |
1069 | 1493 | ||
1070 | bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, | 1494 | bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, |
1071 | bfa_ioc_portid(ioc)); | 1495 | bfa_ioc_portid(ioc)); |
@@ -1077,12 +1501,11 @@ static void | |||
1077 | bfa_ioc_hb_check(void *cbarg) | 1501 | bfa_ioc_hb_check(void *cbarg) |
1078 | { | 1502 | { |
1079 | struct bfa_ioc_s *ioc = cbarg; | 1503 | struct bfa_ioc_s *ioc = cbarg; |
1080 | u32 hb_count; | 1504 | u32 hb_count; |
1081 | 1505 | ||
1082 | hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); | 1506 | hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); |
1083 | if (ioc->hb_count == hb_count) { | 1507 | if (ioc->hb_count == hb_count) { |
1084 | bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, | 1508 | printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count); |
1085 | hb_count); | ||
1086 | bfa_ioc_recover(ioc); | 1509 | bfa_ioc_recover(ioc); |
1087 | return; | 1510 | return; |
1088 | } else { | 1511 | } else { |
@@ -1090,61 +1513,54 @@ bfa_ioc_hb_check(void *cbarg) | |||
1090 | } | 1513 | } |
1091 | 1514 | ||
1092 | bfa_ioc_mbox_poll(ioc); | 1515 | bfa_ioc_mbox_poll(ioc); |
1093 | bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, | 1516 | bfa_hb_timer_start(ioc); |
1094 | ioc, BFA_IOC_HB_TOV); | ||
1095 | } | 1517 | } |
1096 | 1518 | ||
1097 | static void | 1519 | static void |
1098 | bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) | 1520 | bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) |
1099 | { | 1521 | { |
1100 | ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); | 1522 | ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); |
1101 | bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, | 1523 | bfa_hb_timer_start(ioc); |
1102 | BFA_IOC_HB_TOV); | ||
1103 | } | 1524 | } |
1104 | 1525 | ||
1105 | static void | 1526 | static void |
1106 | bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) | 1527 | bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) |
1107 | { | 1528 | { |
1108 | bfa_timer_stop(&ioc->ioc_timer); | 1529 | bfa_hb_timer_stop(ioc); |
1109 | } | 1530 | } |
1110 | 1531 | ||
1532 | |||
1111 | /** | 1533 | /** |
1112 | * Initiate a full firmware download. | 1534 | * Initiate a full firmware download. |
1113 | */ | 1535 | */ |
1114 | static void | 1536 | static void |
1115 | bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, | 1537 | bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, |
1116 | u32 boot_param) | 1538 | u32 boot_env) |
1117 | { | 1539 | { |
1118 | u32 *fwimg; | 1540 | u32 *fwimg; |
1119 | u32 pgnum, pgoff; | 1541 | u32 pgnum, pgoff; |
1120 | u32 loff = 0; | 1542 | u32 loff = 0; |
1121 | u32 chunkno = 0; | 1543 | u32 chunkno = 0; |
1122 | u32 i; | 1544 | u32 i; |
1123 | 1545 | ||
1124 | /** | 1546 | /** |
1125 | * Initialize LMEM first before code download | 1547 | * Initialize LMEM first before code download |
1126 | */ | 1548 | */ |
1127 | bfa_ioc_lmem_init(ioc); | 1549 | bfa_ioc_lmem_init(ioc); |
1128 | 1550 | ||
1129 | /** | 1551 | bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); |
1130 | * Flash based firmware boot | 1552 | fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); |
1131 | */ | ||
1132 | bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); | ||
1133 | if (bfa_ioc_is_optrom(ioc)) | ||
1134 | boot_type = BFI_BOOT_TYPE_FLASH; | ||
1135 | fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); | ||
1136 | |||
1137 | 1553 | ||
1138 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | 1554 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); |
1139 | pgoff = bfa_ioc_smem_pgoff(ioc, loff); | 1555 | pgoff = bfa_ioc_smem_pgoff(ioc, loff); |
1140 | 1556 | ||
1141 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | 1557 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); |
1142 | 1558 | ||
1143 | for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { | 1559 | for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { |
1144 | 1560 | ||
1145 | if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { | 1561 | if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { |
1146 | chunkno = BFA_IOC_FLASH_CHUNK_NO(i); | 1562 | chunkno = BFA_IOC_FLASH_CHUNK_NO(i); |
1147 | fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), | 1563 | fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), |
1148 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); | 1564 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); |
1149 | } | 1565 | } |
1150 | 1566 | ||
@@ -1162,7 +1578,8 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, | |||
1162 | loff = PSS_SMEM_PGOFF(loff); | 1578 | loff = PSS_SMEM_PGOFF(loff); |
1163 | if (loff == 0) { | 1579 | if (loff == 0) { |
1164 | pgnum++; | 1580 | pgnum++; |
1165 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | 1581 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, |
1582 | pgnum); | ||
1166 | } | 1583 | } |
1167 | } | 1584 | } |
1168 | 1585 | ||
@@ -1171,11 +1588,11 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, | |||
1171 | 1588 | ||
1172 | /* | 1589 | /* |
1173 | * Set boot type and boot param at the end. | 1590 | * Set boot type and boot param at the end. |
1174 | */ | 1591 | */ |
1175 | bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, | 1592 | bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, |
1176 | bfa_os_swap32(boot_type)); | 1593 | bfa_os_swap32(boot_type)); |
1177 | bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF, | 1594 | bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF, |
1178 | bfa_os_swap32(boot_param)); | 1595 | bfa_os_swap32(boot_env)); |
1179 | } | 1596 | } |
1180 | 1597 | ||
1181 | static void | 1598 | static void |
@@ -1190,11 +1607,11 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force) | |||
1190 | static void | 1607 | static void |
1191 | bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) | 1608 | bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) |
1192 | { | 1609 | { |
1193 | struct bfi_ioc_attr_s *attr = ioc->attr; | 1610 | struct bfi_ioc_attr_s *attr = ioc->attr; |
1194 | 1611 | ||
1195 | attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); | 1612 | attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); |
1196 | attr->card_type = bfa_os_ntohl(attr->card_type); | 1613 | attr->card_type = bfa_os_ntohl(attr->card_type); |
1197 | attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); | 1614 | attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); |
1198 | 1615 | ||
1199 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); | 1616 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); |
1200 | } | 1617 | } |
@@ -1205,8 +1622,8 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) | |||
1205 | static void | 1622 | static void |
1206 | bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) | 1623 | bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) |
1207 | { | 1624 | { |
1208 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; | 1625 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; |
1209 | int mc; | 1626 | int mc; |
1210 | 1627 | ||
1211 | INIT_LIST_HEAD(&mod->cmd_q); | 1628 | INIT_LIST_HEAD(&mod->cmd_q); |
1212 | for (mc = 0; mc < BFI_MC_MAX; mc++) { | 1629 | for (mc = 0; mc < BFI_MC_MAX; mc++) { |
@@ -1221,9 +1638,9 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) | |||
1221 | static void | 1638 | static void |
1222 | bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) | 1639 | bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) |
1223 | { | 1640 | { |
1224 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; | 1641 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; |
1225 | struct bfa_mbox_cmd_s *cmd; | 1642 | struct bfa_mbox_cmd_s *cmd; |
1226 | u32 stat; | 1643 | u32 stat; |
1227 | 1644 | ||
1228 | /** | 1645 | /** |
1229 | * If no command pending, do nothing | 1646 | * If no command pending, do nothing |
@@ -1251,25 +1668,194 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) | |||
1251 | static void | 1668 | static void |
1252 | bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) | 1669 | bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) |
1253 | { | 1670 | { |
1254 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; | 1671 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; |
1255 | struct bfa_mbox_cmd_s *cmd; | 1672 | struct bfa_mbox_cmd_s *cmd; |
1256 | 1673 | ||
1257 | while (!list_empty(&mod->cmd_q)) | 1674 | while (!list_empty(&mod->cmd_q)) |
1258 | bfa_q_deq(&mod->cmd_q, &cmd); | 1675 | bfa_q_deq(&mod->cmd_q, &cmd); |
1259 | } | 1676 | } |
1260 | 1677 | ||
1261 | /** | 1678 | /** |
1262 | * bfa_ioc_public | 1679 | * Read data from SMEM to host through PCI memmap |
1680 | * | ||
1681 | * @param[in] ioc memory for IOC | ||
1682 | * @param[in] tbuf app memory to store data from smem | ||
1683 | * @param[in] soff smem offset | ||
1684 | * @param[in] sz size of smem in bytes | ||
1685 | */ | ||
1686 | static bfa_status_t | ||
1687 | bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) | ||
1688 | { | ||
1689 | u32 pgnum, loff, r32; | ||
1690 | int i, len; | ||
1691 | u32 *buf = tbuf; | ||
1692 | |||
1693 | pgnum = bfa_ioc_smem_pgnum(ioc, soff); | ||
1694 | loff = bfa_ioc_smem_pgoff(ioc, soff); | ||
1695 | bfa_trc(ioc, pgnum); | ||
1696 | bfa_trc(ioc, loff); | ||
1697 | bfa_trc(ioc, sz); | ||
1698 | |||
1699 | /* | ||
1700 | * Hold semaphore to serialize pll init and fwtrc. | ||
1701 | */ | ||
1702 | if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { | ||
1703 | bfa_trc(ioc, 0); | ||
1704 | return BFA_STATUS_FAILED; | ||
1705 | } | ||
1706 | |||
1707 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | ||
1708 | |||
1709 | len = sz/sizeof(u32); | ||
1710 | bfa_trc(ioc, len); | ||
1711 | for (i = 0; i < len; i++) { | ||
1712 | r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); | ||
1713 | buf[i] = bfa_os_ntohl(r32); | ||
1714 | loff += sizeof(u32); | ||
1715 | |||
1716 | /** | ||
1717 | * handle page offset wrap around | ||
1718 | */ | ||
1719 | loff = PSS_SMEM_PGOFF(loff); | ||
1720 | if (loff == 0) { | ||
1721 | pgnum++; | ||
1722 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | ||
1723 | } | ||
1724 | } | ||
1725 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, | ||
1726 | bfa_ioc_smem_pgnum(ioc, 0)); | ||
1727 | /* | ||
1728 | * release semaphore. | ||
1729 | */ | ||
1730 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | ||
1731 | |||
1732 | bfa_trc(ioc, pgnum); | ||
1733 | return BFA_STATUS_OK; | ||
1734 | } | ||
1735 | |||
1736 | /** | ||
1737 | * Clear SMEM data from host through PCI memmap | ||
1738 | * | ||
1739 | * @param[in] ioc memory for IOC | ||
1740 | * @param[in] soff smem offset | ||
1741 | * @param[in] sz size of smem in bytes | ||
1742 | */ | ||
1743 | static bfa_status_t | ||
1744 | bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) | ||
1745 | { | ||
1746 | int i, len; | ||
1747 | u32 pgnum, loff; | ||
1748 | |||
1749 | pgnum = bfa_ioc_smem_pgnum(ioc, soff); | ||
1750 | loff = bfa_ioc_smem_pgoff(ioc, soff); | ||
1751 | bfa_trc(ioc, pgnum); | ||
1752 | bfa_trc(ioc, loff); | ||
1753 | bfa_trc(ioc, sz); | ||
1754 | |||
1755 | /* | ||
1756 | * Hold semaphore to serialize pll init and fwtrc. | ||
1757 | */ | ||
1758 | if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { | ||
1759 | bfa_trc(ioc, 0); | ||
1760 | return BFA_STATUS_FAILED; | ||
1761 | } | ||
1762 | |||
1763 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | ||
1764 | |||
1765 | len = sz/sizeof(u32); /* len in words */ | ||
1766 | bfa_trc(ioc, len); | ||
1767 | for (i = 0; i < len; i++) { | ||
1768 | bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); | ||
1769 | loff += sizeof(u32); | ||
1770 | |||
1771 | /** | ||
1772 | * handle page offset wrap around | ||
1773 | */ | ||
1774 | loff = PSS_SMEM_PGOFF(loff); | ||
1775 | if (loff == 0) { | ||
1776 | pgnum++; | ||
1777 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | ||
1778 | } | ||
1779 | } | ||
1780 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, | ||
1781 | bfa_ioc_smem_pgnum(ioc, 0)); | ||
1782 | |||
1783 | /* | ||
1784 | * release semaphore. | ||
1785 | */ | ||
1786 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | ||
1787 | bfa_trc(ioc, pgnum); | ||
1788 | return BFA_STATUS_OK; | ||
1789 | } | ||
1790 | |||
1791 | /** | ||
1792 | * hal iocpf to ioc interface | ||
1793 | */ | ||
1794 | static void | ||
1795 | bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc) | ||
1796 | { | ||
1797 | bfa_fsm_send_event(ioc, IOC_E_ENABLED); | ||
1798 | } | ||
1799 | |||
1800 | static void | ||
1801 | bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc) | ||
1802 | { | ||
1803 | bfa_fsm_send_event(ioc, IOC_E_DISABLED); | ||
1804 | } | ||
1805 | |||
1806 | static void | ||
1807 | bfa_ioc_pf_failed(struct bfa_ioc_s *ioc) | ||
1808 | { | ||
1809 | bfa_fsm_send_event(ioc, IOC_E_FAILED); | ||
1810 | } | ||
1811 | |||
1812 | static void | ||
1813 | bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) | ||
1814 | { | ||
1815 | struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; | ||
1816 | /** | ||
1817 | * Provide enable completion callback. | ||
1818 | */ | ||
1819 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | ||
1820 | BFA_LOG(KERN_WARNING, bfad, log_level, | ||
1821 | "Running firmware version is incompatible " | ||
1822 | "with the driver version\n"); | ||
1823 | } | ||
1824 | |||
1825 | |||
1826 | |||
1827 | /** | ||
1828 | * hal_ioc_public | ||
1263 | */ | 1829 | */ |
1264 | 1830 | ||
1831 | bfa_status_t | ||
1832 | bfa_ioc_pll_init(struct bfa_ioc_s *ioc) | ||
1833 | { | ||
1834 | |||
1835 | /* | ||
1836 | * Hold semaphore so that nobody can access the chip during init. | ||
1837 | */ | ||
1838 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); | ||
1839 | |||
1840 | bfa_ioc_pll_init_asic(ioc); | ||
1841 | |||
1842 | ioc->pllinit = BFA_TRUE; | ||
1843 | /* | ||
1844 | * release semaphore. | ||
1845 | */ | ||
1846 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | ||
1847 | |||
1848 | return BFA_STATUS_OK; | ||
1849 | } | ||
1850 | |||
1265 | /** | 1851 | /** |
1266 | * Interface used by diag module to do firmware boot with memory test | 1852 | * Interface used by diag module to do firmware boot with memory test |
1267 | * as the entry vector. | 1853 | * as the entry vector. |
1268 | */ | 1854 | */ |
1269 | void | 1855 | void |
1270 | bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) | 1856 | bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) |
1271 | { | 1857 | { |
1272 | bfa_os_addr_t rb; | 1858 | bfa_os_addr_t rb; |
1273 | 1859 | ||
1274 | bfa_ioc_stats(ioc, ioc_boots); | 1860 | bfa_ioc_stats(ioc, ioc_boots); |
1275 | 1861 | ||
@@ -1280,7 +1866,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) | |||
1280 | * Initialize IOC state of all functions on a chip reset. | 1866 | * Initialize IOC state of all functions on a chip reset. |
1281 | */ | 1867 | */ |
1282 | rb = ioc->pcidev.pci_bar_kva; | 1868 | rb = ioc->pcidev.pci_bar_kva; |
1283 | if (boot_param == BFI_BOOT_TYPE_MEMTEST) { | 1869 | if (boot_type == BFI_BOOT_TYPE_MEMTEST) { |
1284 | bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); | 1870 | bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); |
1285 | bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); | 1871 | bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); |
1286 | } else { | 1872 | } else { |
@@ -1289,7 +1875,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) | |||
1289 | } | 1875 | } |
1290 | 1876 | ||
1291 | bfa_ioc_msgflush(ioc); | 1877 | bfa_ioc_msgflush(ioc); |
1292 | bfa_ioc_download_fw(ioc, boot_type, boot_param); | 1878 | bfa_ioc_download_fw(ioc, boot_type, boot_env); |
1293 | 1879 | ||
1294 | /** | 1880 | /** |
1295 | * Enable interrupts just before starting LPU | 1881 | * Enable interrupts just before starting LPU |
@@ -1308,18 +1894,29 @@ bfa_ioc_auto_recover(bfa_boolean_t auto_recover) | |||
1308 | } | 1894 | } |
1309 | 1895 | ||
1310 | 1896 | ||
1897 | |||
1311 | bfa_boolean_t | 1898 | bfa_boolean_t |
1312 | bfa_ioc_is_operational(struct bfa_ioc_s *ioc) | 1899 | bfa_ioc_is_operational(struct bfa_ioc_s *ioc) |
1313 | { | 1900 | { |
1314 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); | 1901 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); |
1315 | } | 1902 | } |
1316 | 1903 | ||
1904 | bfa_boolean_t | ||
1905 | bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) | ||
1906 | { | ||
1907 | u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); | ||
1908 | |||
1909 | return ((r32 != BFI_IOC_UNINIT) && | ||
1910 | (r32 != BFI_IOC_INITING) && | ||
1911 | (r32 != BFI_IOC_MEMTEST)); | ||
1912 | } | ||
1913 | |||
1317 | void | 1914 | void |
1318 | bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) | 1915 | bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) |
1319 | { | 1916 | { |
1320 | u32 *msgp = mbmsg; | 1917 | u32 *msgp = mbmsg; |
1321 | u32 r32; | 1918 | u32 r32; |
1322 | int i; | 1919 | int i; |
1323 | 1920 | ||
1324 | /** | 1921 | /** |
1325 | * read the MBOX msg | 1922 | * read the MBOX msg |
@@ -1341,9 +1938,10 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) | |||
1341 | void | 1938 | void |
1342 | bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) | 1939 | bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) |
1343 | { | 1940 | { |
1344 | union bfi_ioc_i2h_msg_u *msg; | 1941 | union bfi_ioc_i2h_msg_u *msg; |
1942 | struct bfa_iocpf_s *iocpf = &ioc->iocpf; | ||
1345 | 1943 | ||
1346 | msg = (union bfi_ioc_i2h_msg_u *)m; | 1944 | msg = (union bfi_ioc_i2h_msg_u *) m; |
1347 | 1945 | ||
1348 | bfa_ioc_stats(ioc, ioc_isrs); | 1946 | bfa_ioc_stats(ioc, ioc_isrs); |
1349 | 1947 | ||
@@ -1352,15 +1950,15 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) | |||
1352 | break; | 1950 | break; |
1353 | 1951 | ||
1354 | case BFI_IOC_I2H_READY_EVENT: | 1952 | case BFI_IOC_I2H_READY_EVENT: |
1355 | bfa_fsm_send_event(ioc, IOC_E_FWREADY); | 1953 | bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY); |
1356 | break; | 1954 | break; |
1357 | 1955 | ||
1358 | case BFI_IOC_I2H_ENABLE_REPLY: | 1956 | case BFI_IOC_I2H_ENABLE_REPLY: |
1359 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); | 1957 | bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); |
1360 | break; | 1958 | break; |
1361 | 1959 | ||
1362 | case BFI_IOC_I2H_DISABLE_REPLY: | 1960 | case BFI_IOC_I2H_DISABLE_REPLY: |
1363 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); | 1961 | bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); |
1364 | break; | 1962 | break; |
1365 | 1963 | ||
1366 | case BFI_IOC_I2H_GETATTR_REPLY: | 1964 | case BFI_IOC_I2H_GETATTR_REPLY: |
@@ -1378,29 +1976,24 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) | |||
1378 | * | 1976 | * |
1379 | * @param[in] ioc memory for IOC | 1977 | * @param[in] ioc memory for IOC |
1380 | * @param[in] bfa driver instance structure | 1978 | * @param[in] bfa driver instance structure |
1381 | * @param[in] trcmod kernel trace module | ||
1382 | * @param[in] aen kernel aen event module | ||
1383 | * @param[in] logm kernel logging module | ||
1384 | */ | 1979 | */ |
1385 | void | 1980 | void |
1386 | bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, | 1981 | bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, |
1387 | struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod, | 1982 | struct bfa_timer_mod_s *timer_mod) |
1388 | struct bfa_aen_s *aen, struct bfa_log_mod_s *logm) | 1983 | { |
1389 | { | 1984 | ioc->bfa = bfa; |
1390 | ioc->bfa = bfa; | 1985 | ioc->cbfn = cbfn; |
1391 | ioc->cbfn = cbfn; | 1986 | ioc->timer_mod = timer_mod; |
1392 | ioc->timer_mod = timer_mod; | 1987 | ioc->fcmode = BFA_FALSE; |
1393 | ioc->trcmod = trcmod; | 1988 | ioc->pllinit = BFA_FALSE; |
1394 | ioc->aen = aen; | ||
1395 | ioc->logm = logm; | ||
1396 | ioc->fcmode = BFA_FALSE; | ||
1397 | ioc->pllinit = BFA_FALSE; | ||
1398 | ioc->dbg_fwsave_once = BFA_TRUE; | 1989 | ioc->dbg_fwsave_once = BFA_TRUE; |
1990 | ioc->iocpf.ioc = ioc; | ||
1399 | 1991 | ||
1400 | bfa_ioc_mbox_attach(ioc); | 1992 | bfa_ioc_mbox_attach(ioc); |
1401 | INIT_LIST_HEAD(&ioc->hb_notify_q); | 1993 | INIT_LIST_HEAD(&ioc->hb_notify_q); |
1402 | 1994 | ||
1403 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 1995 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); |
1996 | bfa_fsm_send_event(ioc, IOC_E_RESET); | ||
1404 | } | 1997 | } |
1405 | 1998 | ||
1406 | /** | 1999 | /** |
@@ -1421,10 +2014,10 @@ void | |||
1421 | bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, | 2014 | bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, |
1422 | enum bfi_mclass mc) | 2015 | enum bfi_mclass mc) |
1423 | { | 2016 | { |
1424 | ioc->ioc_mc = mc; | 2017 | ioc->ioc_mc = mc; |
1425 | ioc->pcidev = *pcidev; | 2018 | ioc->pcidev = *pcidev; |
1426 | ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); | 2019 | ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); |
1427 | ioc->cna = ioc->ctdev && !ioc->fcmode; | 2020 | ioc->cna = ioc->ctdev && !ioc->fcmode; |
1428 | 2021 | ||
1429 | /** | 2022 | /** |
1430 | * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c | 2023 | * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c |
@@ -1445,14 +2038,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, | |||
1445 | * @param[in] dm_pa physical address of IOC dma memory | 2038 | * @param[in] dm_pa physical address of IOC dma memory |
1446 | */ | 2039 | */ |
1447 | void | 2040 | void |
1448 | bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) | 2041 | bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) |
1449 | { | 2042 | { |
1450 | /** | 2043 | /** |
1451 | * dma memory for firmware attribute | 2044 | * dma memory for firmware attribute |
1452 | */ | 2045 | */ |
1453 | ioc->attr_dma.kva = dm_kva; | 2046 | ioc->attr_dma.kva = dm_kva; |
1454 | ioc->attr_dma.pa = dm_pa; | 2047 | ioc->attr_dma.pa = dm_pa; |
1455 | ioc->attr = (struct bfi_ioc_attr_s *)dm_kva; | 2048 | ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; |
1456 | } | 2049 | } |
1457 | 2050 | ||
1458 | /** | 2051 | /** |
@@ -1490,7 +2083,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc) | |||
1490 | int | 2083 | int |
1491 | bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) | 2084 | bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) |
1492 | { | 2085 | { |
1493 | return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; | 2086 | return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; |
1494 | } | 2087 | } |
1495 | 2088 | ||
1496 | /** | 2089 | /** |
@@ -1500,8 +2093,8 @@ return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; | |||
1500 | void | 2093 | void |
1501 | bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) | 2094 | bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) |
1502 | { | 2095 | { |
1503 | ioc->dbg_fwsave = dbg_fwsave; | 2096 | ioc->dbg_fwsave = dbg_fwsave; |
1504 | ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); | 2097 | ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover); |
1505 | } | 2098 | } |
1506 | 2099 | ||
1507 | u32 | 2100 | u32 |
@@ -1525,8 +2118,8 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr) | |||
1525 | void | 2118 | void |
1526 | bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) | 2119 | bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) |
1527 | { | 2120 | { |
1528 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; | 2121 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; |
1529 | int mc; | 2122 | int mc; |
1530 | 2123 | ||
1531 | for (mc = 0; mc < BFI_MC_MAX; mc++) | 2124 | for (mc = 0; mc < BFI_MC_MAX; mc++) |
1532 | mod->mbhdlr[mc].cbfn = mcfuncs[mc]; | 2125 | mod->mbhdlr[mc].cbfn = mcfuncs[mc]; |
@@ -1539,10 +2132,10 @@ void | |||
1539 | bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, | 2132 | bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, |
1540 | bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) | 2133 | bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) |
1541 | { | 2134 | { |
1542 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; | 2135 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; |
1543 | 2136 | ||
1544 | mod->mbhdlr[mc].cbfn = cbfn; | 2137 | mod->mbhdlr[mc].cbfn = cbfn; |
1545 | mod->mbhdlr[mc].cbarg = cbarg; | 2138 | mod->mbhdlr[mc].cbarg = cbarg; |
1546 | } | 2139 | } |
1547 | 2140 | ||
1548 | /** | 2141 | /** |
@@ -1555,8 +2148,8 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, | |||
1555 | void | 2148 | void |
1556 | bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) | 2149 | bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) |
1557 | { | 2150 | { |
1558 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; | 2151 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; |
1559 | u32 stat; | 2152 | u32 stat; |
1560 | 2153 | ||
1561 | /** | 2154 | /** |
1562 | * If a previous command is pending, queue new command | 2155 | * If a previous command is pending, queue new command |
@@ -1587,9 +2180,9 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) | |||
1587 | void | 2180 | void |
1588 | bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) | 2181 | bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) |
1589 | { | 2182 | { |
1590 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; | 2183 | struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; |
1591 | struct bfi_mbmsg_s m; | 2184 | struct bfi_mbmsg_s m; |
1592 | int mc; | 2185 | int mc; |
1593 | 2186 | ||
1594 | bfa_ioc_msgget(ioc, &m); | 2187 | bfa_ioc_msgget(ioc, &m); |
1595 | 2188 | ||
@@ -1621,16 +2214,14 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc) | |||
1621 | ioc->port_id = bfa_ioc_pcifn(ioc); | 2214 | ioc->port_id = bfa_ioc_pcifn(ioc); |
1622 | } | 2215 | } |
1623 | 2216 | ||
1624 | #ifndef BFA_BIOS_BUILD | ||
1625 | |||
1626 | /** | 2217 | /** |
1627 | * return true if IOC is disabled | 2218 | * return true if IOC is disabled |
1628 | */ | 2219 | */ |
1629 | bfa_boolean_t | 2220 | bfa_boolean_t |
1630 | bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) | 2221 | bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) |
1631 | { | 2222 | { |
1632 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) | 2223 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || |
1633 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); | 2224 | bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); |
1634 | } | 2225 | } |
1635 | 2226 | ||
1636 | /** | 2227 | /** |
@@ -1639,9 +2230,9 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) | |||
1639 | bfa_boolean_t | 2230 | bfa_boolean_t |
1640 | bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) | 2231 | bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) |
1641 | { | 2232 | { |
1642 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) | 2233 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) || |
1643 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) | 2234 | bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) || |
1644 | || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); | 2235 | bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch); |
1645 | } | 2236 | } |
1646 | 2237 | ||
1647 | #define bfa_ioc_state_disabled(__sm) \ | 2238 | #define bfa_ioc_state_disabled(__sm) \ |
@@ -1659,8 +2250,8 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) | |||
1659 | bfa_boolean_t | 2250 | bfa_boolean_t |
1660 | bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) | 2251 | bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) |
1661 | { | 2252 | { |
1662 | u32 ioc_state; | 2253 | u32 ioc_state; |
1663 | bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; | 2254 | bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; |
1664 | 2255 | ||
1665 | if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) | 2256 | if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) |
1666 | return BFA_FALSE; | 2257 | return BFA_FALSE; |
@@ -1669,16 +2260,18 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) | |||
1669 | if (!bfa_ioc_state_disabled(ioc_state)) | 2260 | if (!bfa_ioc_state_disabled(ioc_state)) |
1670 | return BFA_FALSE; | 2261 | return BFA_FALSE; |
1671 | 2262 | ||
1672 | ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); | 2263 | if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { |
1673 | if (!bfa_ioc_state_disabled(ioc_state)) | 2264 | ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); |
1674 | return BFA_FALSE; | 2265 | if (!bfa_ioc_state_disabled(ioc_state)) |
2266 | return BFA_FALSE; | ||
2267 | } | ||
1675 | 2268 | ||
1676 | return BFA_TRUE; | 2269 | return BFA_TRUE; |
1677 | } | 2270 | } |
1678 | 2271 | ||
1679 | /** | 2272 | /** |
1680 | * Add to IOC heartbeat failure notification queue. To be used by common | 2273 | * Add to IOC heartbeat failure notification queue. To be used by common |
1681 | * modules such as | 2274 | * modules such as cee, port, diag. |
1682 | */ | 2275 | */ |
1683 | void | 2276 | void |
1684 | bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, | 2277 | bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, |
@@ -1692,7 +2285,7 @@ void | |||
1692 | bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, | 2285 | bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, |
1693 | struct bfa_adapter_attr_s *ad_attr) | 2286 | struct bfa_adapter_attr_s *ad_attr) |
1694 | { | 2287 | { |
1695 | struct bfi_ioc_attr_s *ioc_attr; | 2288 | struct bfi_ioc_attr_s *ioc_attr; |
1696 | 2289 | ||
1697 | ioc_attr = ioc->attr; | 2290 | ioc_attr = ioc->attr; |
1698 | 2291 | ||
@@ -1719,7 +2312,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, | |||
1719 | ad_attr->prototype = 0; | 2312 | ad_attr->prototype = 0; |
1720 | 2313 | ||
1721 | ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); | 2314 | ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); |
1722 | ad_attr->mac = bfa_ioc_get_mac(ioc); | 2315 | ad_attr->mac = bfa_ioc_get_mac(ioc); |
1723 | 2316 | ||
1724 | ad_attr->pcie_gen = ioc_attr->pcie_gen; | 2317 | ad_attr->pcie_gen = ioc_attr->pcie_gen; |
1725 | ad_attr->pcie_lanes = ioc_attr->pcie_lanes; | 2318 | ad_attr->pcie_lanes = ioc_attr->pcie_lanes; |
@@ -1729,6 +2322,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, | |||
1729 | bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); | 2322 | bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); |
1730 | 2323 | ||
1731 | ad_attr->cna_capable = ioc->cna; | 2324 | ad_attr->cna_capable = ioc->cna; |
2325 | ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna; | ||
1732 | } | 2326 | } |
1733 | 2327 | ||
1734 | enum bfa_ioc_type_e | 2328 | enum bfa_ioc_type_e |
@@ -1782,7 +2376,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) | |||
1782 | { | 2376 | { |
1783 | bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); | 2377 | bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); |
1784 | bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, | 2378 | bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, |
1785 | BFA_VERSION_LEN); | 2379 | BFA_VERSION_LEN); |
1786 | } | 2380 | } |
1787 | 2381 | ||
1788 | void | 2382 | void |
@@ -1795,7 +2389,7 @@ bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) | |||
1795 | void | 2389 | void |
1796 | bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) | 2390 | bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) |
1797 | { | 2391 | { |
1798 | struct bfi_ioc_attr_s *ioc_attr; | 2392 | struct bfi_ioc_attr_s *ioc_attr; |
1799 | 2393 | ||
1800 | bfa_assert(model); | 2394 | bfa_assert(model); |
1801 | bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); | 2395 | bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); |
@@ -1805,14 +2399,48 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) | |||
1805 | /** | 2399 | /** |
1806 | * model name | 2400 | * model name |
1807 | */ | 2401 | */ |
1808 | snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", | 2402 | bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", |
1809 | BFA_MFG_NAME, ioc_attr->card_type); | 2403 | BFA_MFG_NAME, ioc_attr->card_type); |
1810 | } | 2404 | } |
1811 | 2405 | ||
1812 | enum bfa_ioc_state | 2406 | enum bfa_ioc_state |
1813 | bfa_ioc_get_state(struct bfa_ioc_s *ioc) | 2407 | bfa_ioc_get_state(struct bfa_ioc_s *ioc) |
1814 | { | 2408 | { |
1815 | return bfa_sm_to_state(ioc_sm_table, ioc->fsm); | 2409 | enum bfa_iocpf_state iocpf_st; |
2410 | enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); | ||
2411 | |||
2412 | if (ioc_st == BFA_IOC_ENABLING || | ||
2413 | ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { | ||
2414 | |||
2415 | iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); | ||
2416 | |||
2417 | switch (iocpf_st) { | ||
2418 | case BFA_IOCPF_SEMWAIT: | ||
2419 | ioc_st = BFA_IOC_SEMWAIT; | ||
2420 | break; | ||
2421 | |||
2422 | case BFA_IOCPF_HWINIT: | ||
2423 | ioc_st = BFA_IOC_HWINIT; | ||
2424 | break; | ||
2425 | |||
2426 | case BFA_IOCPF_FWMISMATCH: | ||
2427 | ioc_st = BFA_IOC_FWMISMATCH; | ||
2428 | break; | ||
2429 | |||
2430 | case BFA_IOCPF_FAIL: | ||
2431 | ioc_st = BFA_IOC_FAIL; | ||
2432 | break; | ||
2433 | |||
2434 | case BFA_IOCPF_INITFAIL: | ||
2435 | ioc_st = BFA_IOC_INITFAIL; | ||
2436 | break; | ||
2437 | |||
2438 | default: | ||
2439 | break; | ||
2440 | } | ||
2441 | } | ||
2442 | |||
2443 | return ioc_st; | ||
1816 | } | 2444 | } |
1817 | 2445 | ||
1818 | void | 2446 | void |
@@ -1833,7 +2461,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) | |||
1833 | } | 2461 | } |
1834 | 2462 | ||
1835 | /** | 2463 | /** |
1836 | * bfa_wwn_public | 2464 | * hal_wwn_public |
1837 | */ | 2465 | */ |
1838 | wwn_t | 2466 | wwn_t |
1839 | bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) | 2467 | bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) |
@@ -1857,10 +2485,10 @@ mac_t | |||
1857 | bfa_ioc_get_mac(struct bfa_ioc_s *ioc) | 2485 | bfa_ioc_get_mac(struct bfa_ioc_s *ioc) |
1858 | { | 2486 | { |
1859 | /* | 2487 | /* |
1860 | * Currently mfg mac is used as FCoE enode mac (not configured by PBC) | 2488 | * Check the IOC type and return the appropriate MAC |
1861 | */ | 2489 | */ |
1862 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) | 2490 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) |
1863 | return bfa_ioc_get_mfg_mac(ioc); | 2491 | return ioc->attr->fcoe_mac; |
1864 | else | 2492 | else |
1865 | return ioc->attr->mac; | 2493 | return ioc->attr->mac; |
1866 | } | 2494 | } |
@@ -1880,12 +2508,16 @@ bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc) | |||
1880 | mac_t | 2508 | mac_t |
1881 | bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) | 2509 | bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) |
1882 | { | 2510 | { |
1883 | mac_t mac; | 2511 | mac_t m; |
1884 | 2512 | ||
1885 | mac = ioc->attr->mfg_mac; | 2513 | m = ioc->attr->mfg_mac; |
1886 | mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); | 2514 | if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) |
2515 | m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); | ||
2516 | else | ||
2517 | bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), | ||
2518 | bfa_ioc_pcifn(ioc)); | ||
1887 | 2519 | ||
1888 | return mac; | 2520 | return m; |
1889 | } | 2521 | } |
1890 | 2522 | ||
1891 | bfa_boolean_t | 2523 | bfa_boolean_t |
@@ -1895,46 +2527,12 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) | |||
1895 | } | 2527 | } |
1896 | 2528 | ||
1897 | /** | 2529 | /** |
1898 | * Send AEN notification | ||
1899 | */ | ||
1900 | void | ||
1901 | bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) | ||
1902 | { | ||
1903 | union bfa_aen_data_u aen_data; | ||
1904 | struct bfa_log_mod_s *logmod = ioc->logm; | ||
1905 | s32 inst_num = 0; | ||
1906 | enum bfa_ioc_type_e ioc_type; | ||
1907 | |||
1908 | bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num); | ||
1909 | |||
1910 | memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); | ||
1911 | memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); | ||
1912 | ioc_type = bfa_ioc_get_type(ioc); | ||
1913 | switch (ioc_type) { | ||
1914 | case BFA_IOC_TYPE_FC: | ||
1915 | aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); | ||
1916 | break; | ||
1917 | case BFA_IOC_TYPE_FCoE: | ||
1918 | aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); | ||
1919 | aen_data.ioc.mac = bfa_ioc_get_mac(ioc); | ||
1920 | break; | ||
1921 | case BFA_IOC_TYPE_LL: | ||
1922 | aen_data.ioc.mac = bfa_ioc_get_mac(ioc); | ||
1923 | break; | ||
1924 | default: | ||
1925 | bfa_assert(ioc_type == BFA_IOC_TYPE_FC); | ||
1926 | break; | ||
1927 | } | ||
1928 | aen_data.ioc.ioc_type = ioc_type; | ||
1929 | } | ||
1930 | |||
1931 | /** | ||
1932 | * Retrieve saved firmware trace from a prior IOC failure. | 2530 | * Retrieve saved firmware trace from a prior IOC failure. |
1933 | */ | 2531 | */ |
1934 | bfa_status_t | 2532 | bfa_status_t |
1935 | bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) | 2533 | bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) |
1936 | { | 2534 | { |
1937 | int tlen; | 2535 | int tlen; |
1938 | 2536 | ||
1939 | if (ioc->dbg_fwsave_len == 0) | 2537 | if (ioc->dbg_fwsave_len == 0) |
1940 | return BFA_STATUS_ENOFSAVE; | 2538 | return BFA_STATUS_ENOFSAVE; |
@@ -1963,57 +2561,145 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc) | |||
1963 | bfa_status_t | 2561 | bfa_status_t |
1964 | bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) | 2562 | bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) |
1965 | { | 2563 | { |
1966 | u32 pgnum; | 2564 | u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); |
1967 | u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); | 2565 | int tlen; |
1968 | int i, tlen; | 2566 | bfa_status_t status; |
1969 | u32 *tbuf = trcdata, r32; | ||
1970 | 2567 | ||
1971 | bfa_trc(ioc, *trclen); | 2568 | bfa_trc(ioc, *trclen); |
1972 | 2569 | ||
1973 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | ||
1974 | loff = bfa_ioc_smem_pgoff(ioc, loff); | ||
1975 | |||
1976 | /* | ||
1977 | * Hold semaphore to serialize pll init and fwtrc. | ||
1978 | */ | ||
1979 | if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) | ||
1980 | return BFA_STATUS_FAILED; | ||
1981 | |||
1982 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | ||
1983 | |||
1984 | tlen = *trclen; | 2570 | tlen = *trclen; |
1985 | if (tlen > BFA_DBG_FWTRC_LEN) | 2571 | if (tlen > BFA_DBG_FWTRC_LEN) |
1986 | tlen = BFA_DBG_FWTRC_LEN; | 2572 | tlen = BFA_DBG_FWTRC_LEN; |
1987 | tlen /= sizeof(u32); | ||
1988 | 2573 | ||
1989 | bfa_trc(ioc, tlen); | 2574 | status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen); |
2575 | *trclen = tlen; | ||
2576 | return status; | ||
2577 | } | ||
1990 | 2578 | ||
1991 | for (i = 0; i < tlen; i++) { | 2579 | static void |
1992 | r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); | 2580 | bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc) |
1993 | tbuf[i] = bfa_os_ntohl(r32); | 2581 | { |
1994 | loff += sizeof(u32); | 2582 | struct bfa_mbox_cmd_s cmd; |
2583 | struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg; | ||
1995 | 2584 | ||
1996 | /** | 2585 | bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, |
1997 | * handle page offset wrap around | 2586 | bfa_ioc_portid(ioc)); |
1998 | */ | 2587 | req->ioc_class = ioc->ioc_mc; |
1999 | loff = PSS_SMEM_PGOFF(loff); | 2588 | bfa_ioc_mbox_queue(ioc, &cmd); |
2000 | if (loff == 0) { | 2589 | } |
2001 | pgnum++; | 2590 | |
2002 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | 2591 | static void |
2003 | } | 2592 | bfa_ioc_fwsync(struct bfa_ioc_s *ioc) |
2593 | { | ||
2594 | u32 fwsync_iter = 1000; | ||
2595 | |||
2596 | bfa_ioc_send_fwsync(ioc); | ||
2597 | |||
2598 | /** | ||
2599 | * After sending a fw sync mbox command wait for it to | ||
2600 | * take effect. We will not wait for a response because | ||
2601 | * 1. fw_sync mbox cmd doesn't have a response. | ||
2602 | * 2. Even if we implement that, interrupts might not | ||
2603 | * be enabled when we call this function. | ||
2604 | * So, just keep checking if any mbox cmd is pending, and | ||
2605 | * after waiting for a reasonable amount of time, go ahead. | ||
2606 | * It is possible that fw has crashed and the mbox command | ||
2607 | * is never acknowledged. | ||
2608 | */ | ||
2609 | while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0) | ||
2610 | fwsync_iter--; | ||
2611 | } | ||
2612 | |||
2613 | /** | ||
2614 | * Dump firmware smem | ||
2615 | */ | ||
2616 | bfa_status_t | ||
2617 | bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, | ||
2618 | u32 *offset, int *buflen) | ||
2619 | { | ||
2620 | u32 loff; | ||
2621 | int dlen; | ||
2622 | bfa_status_t status; | ||
2623 | u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc); | ||
2624 | |||
2625 | if (*offset >= smem_len) { | ||
2626 | *offset = *buflen = 0; | ||
2627 | return BFA_STATUS_EINVAL; | ||
2004 | } | 2628 | } |
2005 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, | ||
2006 | bfa_ioc_smem_pgnum(ioc, 0)); | ||
2007 | 2629 | ||
2008 | /* | 2630 | loff = *offset; |
2009 | * release semaphore. | 2631 | dlen = *buflen; |
2632 | |||
2633 | /** | ||
2634 | * First smem read, sync smem before proceeding | ||
2635 | * No need to sync before reading every chunk. | ||
2010 | */ | 2636 | */ |
2011 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | 2637 | if (loff == 0) |
2638 | bfa_ioc_fwsync(ioc); | ||
2012 | 2639 | ||
2013 | bfa_trc(ioc, pgnum); | 2640 | if ((loff + dlen) >= smem_len) |
2641 | dlen = smem_len - loff; | ||
2014 | 2642 | ||
2015 | *trclen = tlen * sizeof(u32); | 2643 | status = bfa_ioc_smem_read(ioc, buf, loff, dlen); |
2016 | return BFA_STATUS_OK; | 2644 | |
2645 | if (status != BFA_STATUS_OK) { | ||
2646 | *offset = *buflen = 0; | ||
2647 | return status; | ||
2648 | } | ||
2649 | |||
2650 | *offset += dlen; | ||
2651 | |||
2652 | if (*offset >= smem_len) | ||
2653 | *offset = 0; | ||
2654 | |||
2655 | *buflen = dlen; | ||
2656 | |||
2657 | return status; | ||
2658 | } | ||
2659 | |||
2660 | /** | ||
2661 | * Firmware statistics | ||
2662 | */ | ||
2663 | bfa_status_t | ||
2664 | bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats) | ||
2665 | { | ||
2666 | u32 loff = BFI_IOC_FWSTATS_OFF + \ | ||
2667 | BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc)); | ||
2668 | int tlen; | ||
2669 | bfa_status_t status; | ||
2670 | |||
2671 | if (ioc->stats_busy) { | ||
2672 | bfa_trc(ioc, ioc->stats_busy); | ||
2673 | return BFA_STATUS_DEVBUSY; | ||
2674 | } | ||
2675 | ioc->stats_busy = BFA_TRUE; | ||
2676 | |||
2677 | tlen = sizeof(struct bfa_fw_stats_s); | ||
2678 | status = bfa_ioc_smem_read(ioc, stats, loff, tlen); | ||
2679 | |||
2680 | ioc->stats_busy = BFA_FALSE; | ||
2681 | return status; | ||
2682 | } | ||
2683 | |||
2684 | bfa_status_t | ||
2685 | bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc) | ||
2686 | { | ||
2687 | u32 loff = BFI_IOC_FWSTATS_OFF + \ | ||
2688 | BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc)); | ||
2689 | int tlen; | ||
2690 | bfa_status_t status; | ||
2691 | |||
2692 | if (ioc->stats_busy) { | ||
2693 | bfa_trc(ioc, ioc->stats_busy); | ||
2694 | return BFA_STATUS_DEVBUSY; | ||
2695 | } | ||
2696 | ioc->stats_busy = BFA_TRUE; | ||
2697 | |||
2698 | tlen = sizeof(struct bfa_fw_stats_s); | ||
2699 | status = bfa_ioc_smem_clr(ioc, loff, tlen); | ||
2700 | |||
2701 | ioc->stats_busy = BFA_FALSE; | ||
2702 | return status; | ||
2017 | } | 2703 | } |
2018 | 2704 | ||
2019 | /** | 2705 | /** |
@@ -2022,7 +2708,7 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) | |||
2022 | static void | 2708 | static void |
2023 | bfa_ioc_debug_save(struct bfa_ioc_s *ioc) | 2709 | bfa_ioc_debug_save(struct bfa_ioc_s *ioc) |
2024 | { | 2710 | { |
2025 | int tlen; | 2711 | int tlen; |
2026 | 2712 | ||
2027 | if (ioc->dbg_fwsave_len) { | 2713 | if (ioc->dbg_fwsave_len) { |
2028 | tlen = ioc->dbg_fwsave_len; | 2714 | tlen = ioc->dbg_fwsave_len; |
@@ -2050,11 +2736,135 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc) | |||
2050 | { | 2736 | { |
2051 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) | 2737 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) |
2052 | return; | 2738 | return; |
2739 | } | ||
2740 | |||
2741 | /** | ||
2742 | * hal_iocpf_pvt BFA IOC PF private functions | ||
2743 | */ | ||
2053 | 2744 | ||
2054 | if (ioc->attr->nwwn == 0) | 2745 | static void |
2055 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN); | 2746 | bfa_iocpf_enable(struct bfa_ioc_s *ioc) |
2056 | if (ioc->attr->pwwn == 0) | 2747 | { |
2057 | bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN); | 2748 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); |
2058 | } | 2749 | } |
2059 | 2750 | ||
2060 | #endif | 2751 | static void |
2752 | bfa_iocpf_disable(struct bfa_ioc_s *ioc) | ||
2753 | { | ||
2754 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); | ||
2755 | } | ||
2756 | |||
2757 | static void | ||
2758 | bfa_iocpf_fail(struct bfa_ioc_s *ioc) | ||
2759 | { | ||
2760 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); | ||
2761 | } | ||
2762 | |||
2763 | static void | ||
2764 | bfa_iocpf_initfail(struct bfa_ioc_s *ioc) | ||
2765 | { | ||
2766 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); | ||
2767 | } | ||
2768 | |||
2769 | static void | ||
2770 | bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc) | ||
2771 | { | ||
2772 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); | ||
2773 | } | ||
2774 | |||
2775 | static void | ||
2776 | bfa_iocpf_stop(struct bfa_ioc_s *ioc) | ||
2777 | { | ||
2778 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); | ||
2779 | } | ||
2780 | |||
2781 | static void | ||
2782 | bfa_iocpf_timeout(void *ioc_arg) | ||
2783 | { | ||
2784 | struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; | ||
2785 | |||
2786 | bfa_trc(ioc, 0); | ||
2787 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); | ||
2788 | } | ||
2789 | |||
2790 | static void | ||
2791 | bfa_iocpf_sem_timeout(void *ioc_arg) | ||
2792 | { | ||
2793 | struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; | ||
2794 | |||
2795 | bfa_ioc_hw_sem_get(ioc); | ||
2796 | } | ||
2797 | |||
2798 | /** | ||
2799 | * bfa timer function | ||
2800 | */ | ||
2801 | void | ||
2802 | bfa_timer_init(struct bfa_timer_mod_s *mod) | ||
2803 | { | ||
2804 | INIT_LIST_HEAD(&mod->timer_q); | ||
2805 | } | ||
2806 | |||
2807 | void | ||
2808 | bfa_timer_beat(struct bfa_timer_mod_s *mod) | ||
2809 | { | ||
2810 | struct list_head *qh = &mod->timer_q; | ||
2811 | struct list_head *qe, *qe_next; | ||
2812 | struct bfa_timer_s *elem; | ||
2813 | struct list_head timedout_q; | ||
2814 | |||
2815 | INIT_LIST_HEAD(&timedout_q); | ||
2816 | |||
2817 | qe = bfa_q_next(qh); | ||
2818 | |||
2819 | while (qe != qh) { | ||
2820 | qe_next = bfa_q_next(qe); | ||
2821 | |||
2822 | elem = (struct bfa_timer_s *) qe; | ||
2823 | if (elem->timeout <= BFA_TIMER_FREQ) { | ||
2824 | elem->timeout = 0; | ||
2825 | list_del(&elem->qe); | ||
2826 | list_add_tail(&elem->qe, &timedout_q); | ||
2827 | } else { | ||
2828 | elem->timeout -= BFA_TIMER_FREQ; | ||
2829 | } | ||
2830 | |||
2831 | qe = qe_next; /* go to next elem */ | ||
2832 | } | ||
2833 | |||
2834 | /* | ||
2835 | * Pop all the timeout entries | ||
2836 | */ | ||
2837 | while (!list_empty(&timedout_q)) { | ||
2838 | bfa_q_deq(&timedout_q, &elem); | ||
2839 | elem->timercb(elem->arg); | ||
2840 | } | ||
2841 | } | ||
2842 | |||
2843 | /** | ||
2844 | * Should be called with lock protection | ||
2845 | */ | ||
2846 | void | ||
2847 | bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, | ||
2848 | void (*timercb) (void *), void *arg, unsigned int timeout) | ||
2849 | { | ||
2850 | |||
2851 | bfa_assert(timercb != NULL); | ||
2852 | bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer)); | ||
2853 | |||
2854 | timer->timeout = timeout; | ||
2855 | timer->timercb = timercb; | ||
2856 | timer->arg = arg; | ||
2857 | |||
2858 | list_add_tail(&timer->qe, &mod->timer_q); | ||
2859 | } | ||
2860 | |||
2861 | /** | ||
2862 | * Should be called with lock protection | ||
2863 | */ | ||
2864 | void | ||
2865 | bfa_timer_stop(struct bfa_timer_s *timer) | ||
2866 | { | ||
2867 | bfa_assert(!list_empty(&timer->qe)); | ||
2868 | |||
2869 | list_del(&timer->qe); | ||
2870 | } | ||