diff options
author | Krishna Gudipati <kgudipat@brocade.com> | 2010-09-15 14:50:55 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-09-16 22:54:24 -0400 |
commit | a36c61f9025b8924f99f54d518763bee7aa84085 (patch) | |
tree | e1d58c382ad31fe1ad3c5f6bccde1b9df1d9fd8c /drivers/scsi/bfa/bfa_core.c | |
parent | edaed859e63aac174fcc3fed81886b91bb124661 (diff) |
[SCSI] bfa: cleanup driver
We have flattened the BFA hierarchy and also reduced the number of
source and header files we used to have earlier.
Signed-off-by: Krishna Gudipati <kgudipat@brocade.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/bfa/bfa_core.c')
-rw-r--r-- | drivers/scsi/bfa/bfa_core.c | 1131 |
1 files changed, 1048 insertions, 83 deletions
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 76fa5c5b40d..c2fa07f2485 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. | 2 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. |
3 | * All rights reserved | 3 | * All rights reserved |
4 | * www.brocade.com | 4 | * www.brocade.com |
5 | * | 5 | * |
@@ -15,27 +15,992 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <bfa.h> | 18 | #include "bfa_modules.h" |
19 | #include <defs/bfa_defs_pci.h> | 19 | #include "bfi_ctreg.h" |
20 | #include <cs/bfa_debug.h> | 20 | #include "bfad_drv.h" |
21 | #include <bfa_iocfc.h> | ||
22 | 21 | ||
23 | #define DEF_CFG_NUM_FABRICS 1 | 22 | BFA_TRC_FILE(HAL, CORE); |
24 | #define DEF_CFG_NUM_LPORTS 256 | ||
25 | #define DEF_CFG_NUM_CQS 4 | ||
26 | #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) | ||
27 | #define DEF_CFG_NUM_TSKIM_REQS 128 | ||
28 | #define DEF_CFG_NUM_FCXP_REQS 64 | ||
29 | #define DEF_CFG_NUM_UF_BUFS 64 | ||
30 | #define DEF_CFG_NUM_RPORTS 1024 | ||
31 | #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) | ||
32 | #define DEF_CFG_NUM_TINS 256 | ||
33 | 23 | ||
34 | #define DEF_CFG_NUM_SGPGS 2048 | 24 | /** |
35 | #define DEF_CFG_NUM_REQQ_ELEMS 256 | 25 | * BFA IOC FC related definitions |
36 | #define DEF_CFG_NUM_RSPQ_ELEMS 64 | 26 | */ |
37 | #define DEF_CFG_NUM_SBOOT_TGTS 16 | 27 | |
38 | #define DEF_CFG_NUM_SBOOT_LUNS 16 | 28 | /** |
29 | * IOC local definitions | ||
30 | */ | ||
31 | #define BFA_IOCFC_TOV 5000 /* msecs */ | ||
32 | |||
33 | enum { | ||
34 | BFA_IOCFC_ACT_NONE = 0, | ||
35 | BFA_IOCFC_ACT_INIT = 1, | ||
36 | BFA_IOCFC_ACT_STOP = 2, | ||
37 | BFA_IOCFC_ACT_DISABLE = 3, | ||
38 | }; | ||
39 | |||
40 | #define DEF_CFG_NUM_FABRICS 1 | ||
41 | #define DEF_CFG_NUM_LPORTS 256 | ||
42 | #define DEF_CFG_NUM_CQS 4 | ||
43 | #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) | ||
44 | #define DEF_CFG_NUM_TSKIM_REQS 128 | ||
45 | #define DEF_CFG_NUM_FCXP_REQS 64 | ||
46 | #define DEF_CFG_NUM_UF_BUFS 64 | ||
47 | #define DEF_CFG_NUM_RPORTS 1024 | ||
48 | #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) | ||
49 | #define DEF_CFG_NUM_TINS 256 | ||
50 | |||
51 | #define DEF_CFG_NUM_SGPGS 2048 | ||
52 | #define DEF_CFG_NUM_REQQ_ELEMS 256 | ||
53 | #define DEF_CFG_NUM_RSPQ_ELEMS 64 | ||
54 | #define DEF_CFG_NUM_SBOOT_TGTS 16 | ||
55 | #define DEF_CFG_NUM_SBOOT_LUNS 16 | ||
56 | |||
57 | /** | ||
58 | * forward declaration for IOC FC functions | ||
59 | */ | ||
60 | static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); | ||
61 | static void bfa_iocfc_disable_cbfn(void *bfa_arg); | ||
62 | static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); | ||
63 | static void bfa_iocfc_reset_cbfn(void *bfa_arg); | ||
64 | static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; | ||
65 | |||
66 | /** | ||
67 | * BFA Interrupt handling functions | ||
68 | */ | ||
69 | static void | ||
70 | bfa_msix_errint(struct bfa_s *bfa, u32 intr) | ||
71 | { | ||
72 | bfa_ioc_error_isr(&bfa->ioc); | ||
73 | } | ||
74 | |||
75 | static void | ||
76 | bfa_msix_lpu(struct bfa_s *bfa) | ||
77 | { | ||
78 | bfa_ioc_mbox_isr(&bfa->ioc); | ||
79 | } | ||
80 | |||
81 | static void | ||
82 | bfa_reqq_resume(struct bfa_s *bfa, int qid) | ||
83 | { | ||
84 | struct list_head *waitq, *qe, *qen; | ||
85 | struct bfa_reqq_wait_s *wqe; | ||
86 | |||
87 | waitq = bfa_reqq(bfa, qid); | ||
88 | list_for_each_safe(qe, qen, waitq) { | ||
89 | /** | ||
90 | * Callback only as long as there is room in request queue | ||
91 | */ | ||
92 | if (bfa_reqq_full(bfa, qid)) | ||
93 | break; | ||
94 | |||
95 | list_del(qe); | ||
96 | wqe = (struct bfa_reqq_wait_s *) qe; | ||
97 | wqe->qresume(wqe->cbarg); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | void | ||
102 | bfa_msix_all(struct bfa_s *bfa, int vec) | ||
103 | { | ||
104 | bfa_intx(bfa); | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * hal_intr_api | ||
109 | */ | ||
110 | bfa_boolean_t | ||
111 | bfa_intx(struct bfa_s *bfa) | ||
112 | { | ||
113 | u32 intr, qintr; | ||
114 | int queue; | ||
115 | |||
116 | intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); | ||
117 | if (!intr) | ||
118 | return BFA_FALSE; | ||
119 | |||
120 | /** | ||
121 | * RME completion queue interrupt | ||
122 | */ | ||
123 | qintr = intr & __HFN_INT_RME_MASK; | ||
124 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); | ||
125 | |||
126 | for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { | ||
127 | if (intr & (__HFN_INT_RME_Q0 << queue)) | ||
128 | bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); | ||
129 | } | ||
130 | intr &= ~qintr; | ||
131 | if (!intr) | ||
132 | return BFA_TRUE; | ||
133 | |||
134 | /** | ||
135 | * CPE completion queue interrupt | ||
136 | */ | ||
137 | qintr = intr & __HFN_INT_CPE_MASK; | ||
138 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); | ||
139 | |||
140 | for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { | ||
141 | if (intr & (__HFN_INT_CPE_Q0 << queue)) | ||
142 | bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); | ||
143 | } | ||
144 | intr &= ~qintr; | ||
145 | if (!intr) | ||
146 | return BFA_TRUE; | ||
147 | |||
148 | bfa_msix_lpu_err(bfa, intr); | ||
149 | |||
150 | return BFA_TRUE; | ||
151 | } | ||
152 | |||
153 | void | ||
154 | bfa_intx_enable(struct bfa_s *bfa) | ||
155 | { | ||
156 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask); | ||
157 | } | ||
158 | |||
159 | void | ||
160 | bfa_intx_disable(struct bfa_s *bfa) | ||
161 | { | ||
162 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); | ||
163 | } | ||
164 | |||
165 | void | ||
166 | bfa_isr_enable(struct bfa_s *bfa) | ||
167 | { | ||
168 | u32 intr_unmask; | ||
169 | int pci_func = bfa_ioc_pcifn(&bfa->ioc); | ||
170 | |||
171 | bfa_trc(bfa, pci_func); | ||
172 | |||
173 | bfa_msix_install(bfa); | ||
174 | intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | | ||
175 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | | ||
176 | __HFN_INT_LL_HALT); | ||
177 | |||
178 | if (pci_func == 0) | ||
179 | intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | | ||
180 | __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | | ||
181 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | | ||
182 | __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | | ||
183 | __HFN_INT_MBOX_LPU0); | ||
184 | else | ||
185 | intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | | ||
186 | __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | | ||
187 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | | ||
188 | __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | | ||
189 | __HFN_INT_MBOX_LPU1); | ||
190 | |||
191 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask); | ||
192 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask); | ||
193 | bfa->iocfc.intr_mask = ~intr_unmask; | ||
194 | bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); | ||
195 | } | ||
196 | |||
197 | void | ||
198 | bfa_isr_disable(struct bfa_s *bfa) | ||
199 | { | ||
200 | bfa_isr_mode_set(bfa, BFA_FALSE); | ||
201 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); | ||
202 | bfa_msix_uninstall(bfa); | ||
203 | } | ||
204 | |||
205 | void | ||
206 | bfa_msix_reqq(struct bfa_s *bfa, int qid) | ||
207 | { | ||
208 | struct list_head *waitq; | ||
209 | |||
210 | qid &= (BFI_IOC_MAX_CQS - 1); | ||
211 | |||
212 | bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); | ||
213 | |||
214 | /** | ||
215 | * Resume any pending requests in the corresponding reqq. | ||
216 | */ | ||
217 | waitq = bfa_reqq(bfa, qid); | ||
218 | if (!list_empty(waitq)) | ||
219 | bfa_reqq_resume(bfa, qid); | ||
220 | } | ||
221 | |||
222 | void | ||
223 | bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) | ||
224 | { | ||
225 | bfa_trc(bfa, m->mhdr.msg_class); | ||
226 | bfa_trc(bfa, m->mhdr.msg_id); | ||
227 | bfa_trc(bfa, m->mhdr.mtag.i2htok); | ||
228 | bfa_assert(0); | ||
229 | bfa_trc_stop(bfa->trcmod); | ||
230 | } | ||
231 | |||
232 | void | ||
233 | bfa_msix_rspq(struct bfa_s *bfa, int qid) | ||
234 | { | ||
235 | struct bfi_msg_s *m; | ||
236 | u32 pi, ci; | ||
237 | struct list_head *waitq; | ||
238 | |||
239 | bfa_trc_fp(bfa, qid); | ||
240 | |||
241 | qid &= (BFI_IOC_MAX_CQS - 1); | ||
242 | |||
243 | bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); | ||
244 | |||
245 | ci = bfa_rspq_ci(bfa, qid); | ||
246 | pi = bfa_rspq_pi(bfa, qid); | ||
247 | |||
248 | bfa_trc_fp(bfa, ci); | ||
249 | bfa_trc_fp(bfa, pi); | ||
250 | |||
251 | if (bfa->rme_process) { | ||
252 | while (ci != pi) { | ||
253 | m = bfa_rspq_elem(bfa, qid, ci); | ||
254 | bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); | ||
255 | |||
256 | bfa_isrs[m->mhdr.msg_class] (bfa, m); | ||
257 | |||
258 | CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * update CI | ||
264 | */ | ||
265 | bfa_rspq_ci(bfa, qid) = pi; | ||
266 | bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); | ||
267 | mmiowb(); | ||
268 | |||
269 | /** | ||
270 | * Resume any pending requests in the corresponding reqq. | ||
271 | */ | ||
272 | waitq = bfa_reqq(bfa, qid); | ||
273 | if (!list_empty(waitq)) | ||
274 | bfa_reqq_resume(bfa, qid); | ||
275 | } | ||
276 | |||
277 | void | ||
278 | bfa_msix_lpu_err(struct bfa_s *bfa, int vec) | ||
279 | { | ||
280 | u32 intr, curr_value; | ||
281 | |||
282 | intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); | ||
283 | |||
284 | if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) | ||
285 | bfa_msix_lpu(bfa); | ||
286 | |||
287 | intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | | ||
288 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); | ||
289 | |||
290 | if (intr) { | ||
291 | if (intr & __HFN_INT_LL_HALT) { | ||
292 | /** | ||
293 | * If LL_HALT bit is set then FW Init Halt LL Port | ||
294 | * Register needs to be cleared as well so Interrupt | ||
295 | * Status Register will be cleared. | ||
296 | */ | ||
297 | curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); | ||
298 | curr_value &= ~__FW_INIT_HALT_P; | ||
299 | bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); | ||
300 | } | ||
301 | |||
302 | if (intr & __HFN_INT_ERR_PSS) { | ||
303 | /** | ||
304 | * ERR_PSS bit needs to be cleared as well in case | ||
305 | * interrups are shared so driver's interrupt handler is | ||
306 | * still called eventhough it is already masked out. | ||
307 | */ | ||
308 | curr_value = bfa_reg_read( | ||
309 | bfa->ioc.ioc_regs.pss_err_status_reg); | ||
310 | curr_value &= __PSS_ERR_STATUS_SET; | ||
311 | bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, | ||
312 | curr_value); | ||
313 | } | ||
314 | |||
315 | bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); | ||
316 | bfa_msix_errint(bfa, intr); | ||
317 | } | ||
318 | } | ||
319 | |||
320 | void | ||
321 | bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func) | ||
322 | { | ||
323 | bfa_isrs[mc] = isr_func; | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * BFA IOC FC related functions | ||
328 | */ | ||
329 | |||
330 | /** | ||
331 | * hal_ioc_pvt BFA IOC private functions | ||
332 | */ | ||
333 | |||
334 | static void | ||
335 | bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) | ||
336 | { | ||
337 | int i, per_reqq_sz, per_rspq_sz; | ||
338 | |||
339 | per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), | ||
340 | BFA_DMA_ALIGN_SZ); | ||
341 | per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), | ||
342 | BFA_DMA_ALIGN_SZ); | ||
343 | |||
344 | /* | ||
345 | * Calculate CQ size | ||
346 | */ | ||
347 | for (i = 0; i < cfg->fwcfg.num_cqs; i++) { | ||
348 | *dm_len = *dm_len + per_reqq_sz; | ||
349 | *dm_len = *dm_len + per_rspq_sz; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Calculate Shadow CI/PI size | ||
354 | */ | ||
355 | for (i = 0; i < cfg->fwcfg.num_cqs; i++) | ||
356 | *dm_len += (2 * BFA_CACHELINE_SZ); | ||
357 | } | ||
358 | |||
359 | static void | ||
360 | bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) | ||
361 | { | ||
362 | *dm_len += | ||
363 | BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); | ||
364 | *dm_len += | ||
365 | BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), | ||
366 | BFA_CACHELINE_SZ); | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ | ||
371 | */ | ||
372 | static void | ||
373 | bfa_iocfc_send_cfg(void *bfa_arg) | ||
374 | { | ||
375 | struct bfa_s *bfa = bfa_arg; | ||
376 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
377 | struct bfi_iocfc_cfg_req_s cfg_req; | ||
378 | struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; | ||
379 | struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; | ||
380 | int i; | ||
381 | |||
382 | bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); | ||
383 | bfa_trc(bfa, cfg->fwcfg.num_cqs); | ||
384 | |||
385 | bfa_iocfc_reset_queues(bfa); | ||
386 | |||
387 | /** | ||
388 | * initialize IOC configuration info | ||
389 | */ | ||
390 | cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; | ||
391 | cfg_info->num_cqs = cfg->fwcfg.num_cqs; | ||
392 | |||
393 | bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); | ||
394 | /** | ||
395 | * dma map REQ and RSP circular queues and shadow pointers | ||
396 | */ | ||
397 | for (i = 0; i < cfg->fwcfg.num_cqs; i++) { | ||
398 | bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], | ||
399 | iocfc->req_cq_ba[i].pa); | ||
400 | bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], | ||
401 | iocfc->req_cq_shadow_ci[i].pa); | ||
402 | cfg_info->req_cq_elems[i] = | ||
403 | bfa_os_htons(cfg->drvcfg.num_reqq_elems); | ||
404 | |||
405 | bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], | ||
406 | iocfc->rsp_cq_ba[i].pa); | ||
407 | bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], | ||
408 | iocfc->rsp_cq_shadow_pi[i].pa); | ||
409 | cfg_info->rsp_cq_elems[i] = | ||
410 | bfa_os_htons(cfg->drvcfg.num_rspq_elems); | ||
411 | } | ||
412 | |||
413 | /** | ||
414 | * Enable interrupt coalescing if it is driver init path | ||
415 | * and not ioc disable/enable path. | ||
416 | */ | ||
417 | if (!iocfc->cfgdone) | ||
418 | cfg_info->intr_attr.coalesce = BFA_TRUE; | ||
419 | |||
420 | iocfc->cfgdone = BFA_FALSE; | ||
421 | |||
422 | /** | ||
423 | * dma map IOC configuration itself | ||
424 | */ | ||
425 | bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, | ||
426 | bfa_lpuid(bfa)); | ||
427 | bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); | ||
428 | |||
429 | bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, | ||
430 | sizeof(struct bfi_iocfc_cfg_req_s)); | ||
431 | } | ||
432 | |||
433 | static void | ||
434 | bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | ||
435 | struct bfa_pcidev_s *pcidev) | ||
436 | { | ||
437 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
438 | |||
439 | bfa->bfad = bfad; | ||
440 | iocfc->bfa = bfa; | ||
441 | iocfc->action = BFA_IOCFC_ACT_NONE; | ||
442 | |||
443 | bfa_os_assign(iocfc->cfg, *cfg); | ||
444 | |||
445 | /** | ||
446 | * Initialize chip specific handlers. | ||
447 | */ | ||
448 | if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { | ||
449 | iocfc->hwif.hw_reginit = bfa_hwct_reginit; | ||
450 | iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; | ||
451 | iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; | ||
452 | iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; | ||
453 | iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; | ||
454 | iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; | ||
455 | iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; | ||
456 | iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; | ||
457 | iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; | ||
458 | } else { | ||
459 | iocfc->hwif.hw_reginit = bfa_hwcb_reginit; | ||
460 | iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; | ||
461 | iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; | ||
462 | iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; | ||
463 | iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; | ||
464 | iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; | ||
465 | iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; | ||
466 | iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; | ||
467 | iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; | ||
468 | } | ||
469 | |||
470 | iocfc->hwif.hw_reginit(bfa); | ||
471 | bfa->msix.nvecs = 0; | ||
472 | } | ||
473 | |||
474 | static void | ||
475 | bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, | ||
476 | struct bfa_meminfo_s *meminfo) | ||
477 | { | ||
478 | u8 *dm_kva; | ||
479 | u64 dm_pa; | ||
480 | int i, per_reqq_sz, per_rspq_sz; | ||
481 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
482 | int dbgsz; | ||
483 | |||
484 | dm_kva = bfa_meminfo_dma_virt(meminfo); | ||
485 | dm_pa = bfa_meminfo_dma_phys(meminfo); | ||
486 | |||
487 | /* | ||
488 | * First allocate dma memory for IOC. | ||
489 | */ | ||
490 | bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); | ||
491 | dm_kva += bfa_ioc_meminfo(); | ||
492 | dm_pa += bfa_ioc_meminfo(); | ||
493 | |||
494 | /* | ||
495 | * Claim DMA-able memory for the request/response queues and for shadow | ||
496 | * ci/pi registers | ||
497 | */ | ||
498 | per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), | ||
499 | BFA_DMA_ALIGN_SZ); | ||
500 | per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), | ||
501 | BFA_DMA_ALIGN_SZ); | ||
502 | |||
503 | for (i = 0; i < cfg->fwcfg.num_cqs; i++) { | ||
504 | iocfc->req_cq_ba[i].kva = dm_kva; | ||
505 | iocfc->req_cq_ba[i].pa = dm_pa; | ||
506 | bfa_os_memset(dm_kva, 0, per_reqq_sz); | ||
507 | dm_kva += per_reqq_sz; | ||
508 | dm_pa += per_reqq_sz; | ||
509 | |||
510 | iocfc->rsp_cq_ba[i].kva = dm_kva; | ||
511 | iocfc->rsp_cq_ba[i].pa = dm_pa; | ||
512 | bfa_os_memset(dm_kva, 0, per_rspq_sz); | ||
513 | dm_kva += per_rspq_sz; | ||
514 | dm_pa += per_rspq_sz; | ||
515 | } | ||
516 | |||
517 | for (i = 0; i < cfg->fwcfg.num_cqs; i++) { | ||
518 | iocfc->req_cq_shadow_ci[i].kva = dm_kva; | ||
519 | iocfc->req_cq_shadow_ci[i].pa = dm_pa; | ||
520 | dm_kva += BFA_CACHELINE_SZ; | ||
521 | dm_pa += BFA_CACHELINE_SZ; | ||
522 | |||
523 | iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; | ||
524 | iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; | ||
525 | dm_kva += BFA_CACHELINE_SZ; | ||
526 | dm_pa += BFA_CACHELINE_SZ; | ||
527 | } | ||
528 | |||
529 | /* | ||
530 | * Claim DMA-able memory for the config info page | ||
531 | */ | ||
532 | bfa->iocfc.cfg_info.kva = dm_kva; | ||
533 | bfa->iocfc.cfg_info.pa = dm_pa; | ||
534 | bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; | ||
535 | dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); | ||
536 | dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); | ||
537 | |||
538 | /* | ||
539 | * Claim DMA-able memory for the config response | ||
540 | */ | ||
541 | bfa->iocfc.cfgrsp_dma.kva = dm_kva; | ||
542 | bfa->iocfc.cfgrsp_dma.pa = dm_pa; | ||
543 | bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; | ||
544 | |||
545 | dm_kva += | ||
546 | BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), | ||
547 | BFA_CACHELINE_SZ); | ||
548 | dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), | ||
549 | BFA_CACHELINE_SZ); | ||
550 | |||
551 | |||
552 | bfa_meminfo_dma_virt(meminfo) = dm_kva; | ||
553 | bfa_meminfo_dma_phys(meminfo) = dm_pa; | ||
554 | |||
555 | dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover); | ||
556 | if (dbgsz > 0) { | ||
557 | bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); | ||
558 | bfa_meminfo_kva(meminfo) += dbgsz; | ||
559 | } | ||
560 | } | ||
561 | |||
562 | /** | ||
563 | * Start BFA submodules. | ||
564 | */ | ||
565 | static void | ||
566 | bfa_iocfc_start_submod(struct bfa_s *bfa) | ||
567 | { | ||
568 | int i; | ||
569 | |||
570 | bfa->rme_process = BFA_TRUE; | ||
571 | |||
572 | for (i = 0; hal_mods[i]; i++) | ||
573 | hal_mods[i]->start(bfa); | ||
574 | } | ||
575 | |||
576 | /** | ||
577 | * Disable BFA submodules. | ||
578 | */ | ||
579 | static void | ||
580 | bfa_iocfc_disable_submod(struct bfa_s *bfa) | ||
581 | { | ||
582 | int i; | ||
583 | |||
584 | for (i = 0; hal_mods[i]; i++) | ||
585 | hal_mods[i]->iocdisable(bfa); | ||
586 | } | ||
587 | |||
588 | static void | ||
589 | bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) | ||
590 | { | ||
591 | struct bfa_s *bfa = bfa_arg; | ||
592 | |||
593 | if (complete) { | ||
594 | if (bfa->iocfc.cfgdone) | ||
595 | bfa_cb_init(bfa->bfad, BFA_STATUS_OK); | ||
596 | else | ||
597 | bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); | ||
598 | } else { | ||
599 | if (bfa->iocfc.cfgdone) | ||
600 | bfa->iocfc.action = BFA_IOCFC_ACT_NONE; | ||
601 | } | ||
602 | } | ||
603 | |||
604 | static void | ||
605 | bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) | ||
606 | { | ||
607 | struct bfa_s *bfa = bfa_arg; | ||
608 | struct bfad_s *bfad = bfa->bfad; | ||
609 | |||
610 | if (compl) | ||
611 | complete(&bfad->comp); | ||
612 | else | ||
613 | bfa->iocfc.action = BFA_IOCFC_ACT_NONE; | ||
614 | } | ||
615 | |||
616 | static void | ||
617 | bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) | ||
618 | { | ||
619 | struct bfa_s *bfa = bfa_arg; | ||
620 | struct bfad_s *bfad = bfa->bfad; | ||
621 | |||
622 | if (compl) | ||
623 | complete(&bfad->disable_comp); | ||
624 | } | ||
625 | |||
626 | /** | ||
627 | * Update BFA configuration from firmware configuration. | ||
628 | */ | ||
629 | static void | ||
630 | bfa_iocfc_cfgrsp(struct bfa_s *bfa) | ||
631 | { | ||
632 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
633 | struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; | ||
634 | struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; | ||
635 | |||
636 | fwcfg->num_cqs = fwcfg->num_cqs; | ||
637 | fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); | ||
638 | fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs); | ||
639 | fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs); | ||
640 | fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); | ||
641 | fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); | ||
642 | |||
643 | iocfc->cfgdone = BFA_TRUE; | ||
644 | |||
645 | /** | ||
646 | * Configuration is complete - initialize/start submodules | ||
647 | */ | ||
648 | bfa_fcport_init(bfa); | ||
649 | |||
650 | if (iocfc->action == BFA_IOCFC_ACT_INIT) | ||
651 | bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); | ||
652 | else | ||
653 | bfa_iocfc_start_submod(bfa); | ||
654 | } | ||
655 | void | ||
656 | bfa_iocfc_reset_queues(struct bfa_s *bfa) | ||
657 | { | ||
658 | int q; | ||
659 | |||
660 | for (q = 0; q < BFI_IOC_MAX_CQS; q++) { | ||
661 | bfa_reqq_ci(bfa, q) = 0; | ||
662 | bfa_reqq_pi(bfa, q) = 0; | ||
663 | bfa_rspq_ci(bfa, q) = 0; | ||
664 | bfa_rspq_pi(bfa, q) = 0; | ||
665 | } | ||
666 | } | ||
667 | |||
668 | /** | ||
669 | * IOC enable request is complete | ||
670 | */ | ||
671 | static void | ||
672 | bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) | ||
673 | { | ||
674 | struct bfa_s *bfa = bfa_arg; | ||
675 | |||
676 | if (status != BFA_STATUS_OK) { | ||
677 | bfa_isr_disable(bfa); | ||
678 | if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) | ||
679 | bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, | ||
680 | bfa_iocfc_init_cb, bfa); | ||
681 | return; | ||
682 | } | ||
683 | |||
684 | bfa_iocfc_send_cfg(bfa); | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * IOC disable request is complete | ||
689 | */ | ||
690 | static void | ||
691 | bfa_iocfc_disable_cbfn(void *bfa_arg) | ||
692 | { | ||
693 | struct bfa_s *bfa = bfa_arg; | ||
694 | |||
695 | bfa_isr_disable(bfa); | ||
696 | bfa_iocfc_disable_submod(bfa); | ||
697 | |||
698 | if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) | ||
699 | bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, | ||
700 | bfa); | ||
701 | else { | ||
702 | bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE); | ||
703 | bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, | ||
704 | bfa); | ||
705 | } | ||
706 | } | ||
707 | |||
708 | /** | ||
709 | * Notify sub-modules of hardware failure. | ||
710 | */ | ||
711 | static void | ||
712 | bfa_iocfc_hbfail_cbfn(void *bfa_arg) | ||
713 | { | ||
714 | struct bfa_s *bfa = bfa_arg; | ||
715 | |||
716 | bfa->rme_process = BFA_FALSE; | ||
717 | |||
718 | bfa_isr_disable(bfa); | ||
719 | bfa_iocfc_disable_submod(bfa); | ||
720 | |||
721 | if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) | ||
722 | bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, | ||
723 | bfa); | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * Actions on chip-reset completion. | ||
728 | */ | ||
729 | static void | ||
730 | bfa_iocfc_reset_cbfn(void *bfa_arg) | ||
731 | { | ||
732 | struct bfa_s *bfa = bfa_arg; | ||
733 | |||
734 | bfa_iocfc_reset_queues(bfa); | ||
735 | bfa_isr_enable(bfa); | ||
736 | } | ||
737 | |||
738 | /** | ||
739 | * hal_ioc_public | ||
740 | */ | ||
741 | |||
742 | /** | ||
743 | * Query IOC memory requirement information. | ||
744 | */ | ||
745 | void | ||
746 | bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, | ||
747 | u32 *dm_len) | ||
748 | { | ||
749 | /* dma memory for IOC */ | ||
750 | *dm_len += bfa_ioc_meminfo(); | ||
751 | |||
752 | bfa_iocfc_fw_cfg_sz(cfg, dm_len); | ||
753 | bfa_iocfc_cqs_sz(cfg, dm_len); | ||
754 | *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); | ||
755 | } | ||
756 | |||
757 | /** | ||
758 | * Query IOC memory requirement information. | ||
759 | */ | ||
760 | void | ||
761 | bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | ||
762 | struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) | ||
763 | { | ||
764 | int i; | ||
765 | struct bfa_ioc_s *ioc = &bfa->ioc; | ||
766 | |||
767 | bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; | ||
768 | bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; | ||
769 | bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; | ||
770 | bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; | ||
771 | |||
772 | ioc->trcmod = bfa->trcmod; | ||
773 | bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); | ||
774 | |||
775 | /** | ||
776 | * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. | ||
777 | */ | ||
778 | if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) | ||
779 | bfa_ioc_set_fcmode(&bfa->ioc); | ||
780 | |||
781 | bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); | ||
782 | bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); | ||
783 | |||
784 | bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); | ||
785 | bfa_iocfc_mem_claim(bfa, cfg, meminfo); | ||
786 | bfa_timer_init(&bfa->timer_mod); | ||
787 | |||
788 | INIT_LIST_HEAD(&bfa->comp_q); | ||
789 | for (i = 0; i < BFI_IOC_MAX_CQS; i++) | ||
790 | INIT_LIST_HEAD(&bfa->reqq_waitq[i]); | ||
791 | } | ||
792 | |||
793 | /** | ||
794 | * Query IOC memory requirement information. | ||
795 | */ | ||
796 | void | ||
797 | bfa_iocfc_detach(struct bfa_s *bfa) | ||
798 | { | ||
799 | bfa_ioc_detach(&bfa->ioc); | ||
800 | } | ||
801 | |||
802 | /** | ||
803 | * Query IOC memory requirement information. | ||
804 | */ | ||
805 | void | ||
806 | bfa_iocfc_init(struct bfa_s *bfa) | ||
807 | { | ||
808 | bfa->iocfc.action = BFA_IOCFC_ACT_INIT; | ||
809 | bfa_ioc_enable(&bfa->ioc); | ||
810 | } | ||
811 | |||
812 | /** | ||
813 | * IOC start called from bfa_start(). Called to start IOC operations | ||
814 | * at driver instantiation for this instance. | ||
815 | */ | ||
816 | void | ||
817 | bfa_iocfc_start(struct bfa_s *bfa) | ||
818 | { | ||
819 | if (bfa->iocfc.cfgdone) | ||
820 | bfa_iocfc_start_submod(bfa); | ||
821 | } | ||
822 | |||
823 | /** | ||
824 | * IOC stop called from bfa_stop(). Called only when driver is unloaded | ||
825 | * for this instance. | ||
826 | */ | ||
827 | void | ||
828 | bfa_iocfc_stop(struct bfa_s *bfa) | ||
829 | { | ||
830 | bfa->iocfc.action = BFA_IOCFC_ACT_STOP; | ||
831 | |||
832 | bfa->rme_process = BFA_FALSE; | ||
833 | bfa_ioc_disable(&bfa->ioc); | ||
834 | } | ||
835 | |||
836 | void | ||
837 | bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) | ||
838 | { | ||
839 | struct bfa_s *bfa = bfaarg; | ||
840 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
841 | union bfi_iocfc_i2h_msg_u *msg; | ||
842 | |||
843 | msg = (union bfi_iocfc_i2h_msg_u *) m; | ||
844 | bfa_trc(bfa, msg->mh.msg_id); | ||
845 | |||
846 | switch (msg->mh.msg_id) { | ||
847 | case BFI_IOCFC_I2H_CFG_REPLY: | ||
848 | iocfc->cfg_reply = &msg->cfg_reply; | ||
849 | bfa_iocfc_cfgrsp(bfa); | ||
850 | break; | ||
851 | case BFI_IOCFC_I2H_UPDATEQ_RSP: | ||
852 | iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); | ||
853 | break; | ||
854 | default: | ||
855 | bfa_assert(0); | ||
856 | } | ||
857 | } | ||
858 | |||
859 | void | ||
860 | bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr) | ||
861 | { | ||
862 | bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr); | ||
863 | } | ||
864 | |||
865 | u64 | ||
866 | bfa_adapter_get_id(struct bfa_s *bfa) | ||
867 | { | ||
868 | return bfa_ioc_get_adid(&bfa->ioc); | ||
869 | } | ||
870 | |||
871 | void | ||
872 | bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) | ||
873 | { | ||
874 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
875 | |||
876 | attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; | ||
877 | |||
878 | attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? | ||
879 | bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) : | ||
880 | bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay); | ||
881 | |||
882 | attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? | ||
883 | bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) : | ||
884 | bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency); | ||
885 | |||
886 | attr->config = iocfc->cfg; | ||
887 | } | ||
888 | |||
889 | bfa_status_t | ||
890 | bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) | ||
891 | { | ||
892 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
893 | struct bfi_iocfc_set_intr_req_s *m; | ||
894 | |||
895 | iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; | ||
896 | iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay); | ||
897 | iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency); | ||
898 | |||
899 | if (!bfa_iocfc_is_operational(bfa)) | ||
900 | return BFA_STATUS_OK; | ||
901 | |||
902 | m = bfa_reqq_next(bfa, BFA_REQQ_IOC); | ||
903 | if (!m) | ||
904 | return BFA_STATUS_DEVBUSY; | ||
905 | |||
906 | bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, | ||
907 | bfa_lpuid(bfa)); | ||
908 | m->coalesce = iocfc->cfginfo->intr_attr.coalesce; | ||
909 | m->delay = iocfc->cfginfo->intr_attr.delay; | ||
910 | m->latency = iocfc->cfginfo->intr_attr.latency; | ||
911 | |||
912 | bfa_trc(bfa, attr->delay); | ||
913 | bfa_trc(bfa, attr->latency); | ||
914 | |||
915 | bfa_reqq_produce(bfa, BFA_REQQ_IOC); | ||
916 | return BFA_STATUS_OK; | ||
917 | } | ||
918 | |||
919 | void | ||
920 | bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) | ||
921 | { | ||
922 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
923 | |||
924 | iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); | ||
925 | bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); | ||
926 | } | ||
927 | /** | ||
928 | * Enable IOC after it is disabled. | ||
929 | */ | ||
930 | void | ||
931 | bfa_iocfc_enable(struct bfa_s *bfa) | ||
932 | { | ||
933 | bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, | ||
934 | "IOC Enable"); | ||
935 | bfa_ioc_enable(&bfa->ioc); | ||
936 | } | ||
937 | |||
938 | void | ||
939 | bfa_iocfc_disable(struct bfa_s *bfa) | ||
940 | { | ||
941 | bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, | ||
942 | "IOC Disable"); | ||
943 | bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; | ||
944 | |||
945 | bfa->rme_process = BFA_FALSE; | ||
946 | bfa_ioc_disable(&bfa->ioc); | ||
947 | } | ||
948 | |||
949 | |||
950 | bfa_boolean_t | ||
951 | bfa_iocfc_is_operational(struct bfa_s *bfa) | ||
952 | { | ||
953 | return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; | ||
954 | } | ||
955 | |||
956 | /** | ||
957 | * Return boot target port wwns -- read from boot information in flash. | ||
958 | */ | ||
959 | void | ||
960 | bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) | ||
961 | { | ||
962 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
963 | struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; | ||
964 | int i; | ||
965 | |||
966 | if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { | ||
967 | bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); | ||
968 | *nwwns = cfgrsp->pbc_cfg.nbluns; | ||
969 | for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) | ||
970 | wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; | ||
971 | |||
972 | return; | ||
973 | } | ||
974 | |||
975 | *nwwns = cfgrsp->bootwwns.nwwns; | ||
976 | memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); | ||
977 | } | ||
978 | |||
979 | void | ||
980 | bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg) | ||
981 | { | ||
982 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
983 | struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; | ||
984 | |||
985 | pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; | ||
986 | pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; | ||
987 | pbcfg->speed = cfgrsp->pbc_cfg.port_speed; | ||
988 | memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); | ||
989 | } | ||
990 | |||
991 | int | ||
992 | bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) | ||
993 | { | ||
994 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
995 | struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; | ||
996 | |||
997 | memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); | ||
998 | return cfgrsp->pbc_cfg.nvports; | ||
999 | } | ||
1000 | |||
1001 | /** | ||
1002 | * hal_api | ||
1003 | */ | ||
39 | 1004 | ||
40 | /** | 1005 | /** |
41 | * Use this function query the memory requirement of the BFA library. | 1006 | * Use this function query the memory requirement of the BFA library. |
@@ -45,16 +1010,16 @@ | |||
45 | * This call will fail, if the cap is out of range compared to pre-defined | 1010 | * This call will fail, if the cap is out of range compared to pre-defined |
46 | * values within the BFA library | 1011 | * values within the BFA library |
47 | * | 1012 | * |
48 | * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate | 1013 | * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate |
49 | * its configuration in this structure. | 1014 | * its configuration in this structure. |
50 | * The default values for struct bfa_iocfc_cfg_s can be | 1015 | * The default values for struct bfa_iocfc_cfg_s can be |
51 | * fetched using bfa_cfg_get_default() API. | 1016 | * fetched using bfa_cfg_get_default() API. |
52 | * | 1017 | * |
53 | * If cap's boundary check fails, the library will use | 1018 | * If cap's boundary check fails, the library will use |
54 | * the default bfa_cap_t values (and log a warning msg). | 1019 | * the default bfa_cap_t values (and log a warning msg). |
55 | * | 1020 | * |
56 | * @param[out] meminfo - pointer to bfa_meminfo_t. This content | 1021 | * @param[out] meminfo - pointer to bfa_meminfo_t. This content |
57 | * indicates the memory type (see bfa_mem_type_t) and | 1022 | * indicates the memory type (see bfa_mem_type_t) and |
58 | * amount of memory required. | 1023 | * amount of memory required. |
59 | * | 1024 | * |
60 | * Driver should allocate the memory, populate the | 1025 | * Driver should allocate the memory, populate the |
@@ -68,8 +1033,8 @@ | |||
68 | void | 1033 | void |
69 | bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) | 1034 | bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) |
70 | { | 1035 | { |
71 | int i; | 1036 | int i; |
72 | u32 km_len = 0, dm_len = 0; | 1037 | u32 km_len = 0, dm_len = 0; |
73 | 1038 | ||
74 | bfa_assert((cfg != NULL) && (meminfo != NULL)); | 1039 | bfa_assert((cfg != NULL) && (meminfo != NULL)); |
75 | 1040 | ||
@@ -90,26 +1055,6 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) | |||
90 | meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; | 1055 | meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; |
91 | } | 1056 | } |
92 | 1057 | ||
93 | static void | ||
94 | bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) | ||
95 | { | ||
96 | struct bfa_port_s *port = &bfa->modules.port; | ||
97 | uint32_t dm_len; | ||
98 | uint8_t *dm_kva; | ||
99 | uint64_t dm_pa; | ||
100 | |||
101 | dm_len = bfa_port_meminfo(); | ||
102 | dm_kva = bfa_meminfo_dma_virt(mi); | ||
103 | dm_pa = bfa_meminfo_dma_phys(mi); | ||
104 | |||
105 | memset(port, 0, sizeof(struct bfa_port_s)); | ||
106 | bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm); | ||
107 | bfa_port_mem_claim(port, dm_kva, dm_pa); | ||
108 | |||
109 | bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; | ||
110 | bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; | ||
111 | } | ||
112 | |||
113 | /** | 1058 | /** |
114 | * Use this function to do attach the driver instance with the BFA | 1059 | * Use this function to do attach the driver instance with the BFA |
115 | * library. This function will not trigger any HW initialization | 1060 | * library. This function will not trigger any HW initialization |
@@ -119,14 +1064,14 @@ bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) | |||
119 | * pre-defined values within the BFA library | 1064 | * pre-defined values within the BFA library |
120 | * | 1065 | * |
121 | * @param[out] bfa Pointer to bfa_t. | 1066 | * @param[out] bfa Pointer to bfa_t. |
122 | * @param[in] bfad Opaque handle back to the driver's IOC structure | 1067 | * @param[in] bfad Opaque handle back to the driver's IOC structure |
123 | * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure | 1068 | * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure |
124 | * that was used in bfa_cfg_get_meminfo(). | 1069 | * that was used in bfa_cfg_get_meminfo(). |
125 | * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should | 1070 | * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should |
126 | * use the bfa_cfg_get_meminfo() call to | 1071 | * use the bfa_cfg_get_meminfo() call to |
127 | * find the memory blocks required, allocate the | 1072 | * find the memory blocks required, allocate the |
128 | * required memory and provide the starting addresses. | 1073 | * required memory and provide the starting addresses. |
129 | * @param[in] pcidev pointer to struct bfa_pcidev_s | 1074 | * @param[in] pcidev pointer to struct bfa_pcidev_s |
130 | * | 1075 | * |
131 | * @return | 1076 | * @return |
132 | * void | 1077 | * void |
@@ -140,8 +1085,8 @@ void | |||
140 | bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | 1085 | bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, |
141 | struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) | 1086 | struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) |
142 | { | 1087 | { |
143 | int i; | 1088 | int i; |
144 | struct bfa_mem_elem_s *melem; | 1089 | struct bfa_mem_elem_s *melem; |
145 | 1090 | ||
146 | bfa->fcs = BFA_FALSE; | 1091 | bfa->fcs = BFA_FALSE; |
147 | 1092 | ||
@@ -195,20 +1140,6 @@ bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod) | |||
195 | bfa->trcmod = trcmod; | 1140 | bfa->trcmod = trcmod; |
196 | } | 1141 | } |
197 | 1142 | ||
198 | |||
199 | void | ||
200 | bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod) | ||
201 | { | ||
202 | bfa->logm = logmod; | ||
203 | } | ||
204 | |||
205 | |||
206 | void | ||
207 | bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen) | ||
208 | { | ||
209 | bfa->aen = aen; | ||
210 | } | ||
211 | |||
212 | void | 1143 | void |
213 | bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog) | 1144 | bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog) |
214 | { | 1145 | { |
@@ -254,14 +1185,14 @@ bfa_start(struct bfa_s *bfa) | |||
254 | 1185 | ||
255 | /** | 1186 | /** |
256 | * Use this function quiese the IOC. This function will return immediately, | 1187 | * Use this function quiese the IOC. This function will return immediately, |
257 | * when the IOC is actually stopped, the bfa_cb_stop() will be called. | 1188 | * when the IOC is actually stopped, the bfad->comp will be set. |
258 | * | 1189 | * |
259 | * @param[in] bfa - pointer to bfa_t. | 1190 | * @param[in]bfa - pointer to bfa_t. |
260 | * | 1191 | * |
261 | * @return None | 1192 | * @return None |
262 | * | 1193 | * |
263 | * Special Considerations: | 1194 | * Special Considerations: |
264 | * bfa_cb_stop() could be called before or after bfa_stop() returns. | 1195 | * bfad->comp can be set before or after bfa_stop() returns. |
265 | * | 1196 | * |
266 | * @note | 1197 | * @note |
267 | * In case of any failure, we could handle it automatically by doing a | 1198 | * In case of any failure, we could handle it automatically by doing a |
@@ -283,9 +1214,9 @@ bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) | |||
283 | void | 1214 | void |
284 | bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) | 1215 | bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) |
285 | { | 1216 | { |
286 | struct list_head *qe; | 1217 | struct list_head *qe; |
287 | struct list_head *qen; | 1218 | struct list_head *qen; |
288 | struct bfa_cb_qe_s *hcb_qe; | 1219 | struct bfa_cb_qe_s *hcb_qe; |
289 | 1220 | ||
290 | list_for_each_safe(qe, qen, comp_q) { | 1221 | list_for_each_safe(qe, qen, comp_q) { |
291 | hcb_qe = (struct bfa_cb_qe_s *) qe; | 1222 | hcb_qe = (struct bfa_cb_qe_s *) qe; |
@@ -296,8 +1227,8 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) | |||
296 | void | 1227 | void |
297 | bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) | 1228 | bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) |
298 | { | 1229 | { |
299 | struct list_head *qe; | 1230 | struct list_head *qe; |
300 | struct bfa_cb_qe_s *hcb_qe; | 1231 | struct bfa_cb_qe_s *hcb_qe; |
301 | 1232 | ||
302 | while (!list_empty(comp_q)) { | 1233 | while (!list_empty(comp_q)) { |
303 | bfa_q_deq(comp_q, &qe); | 1234 | bfa_q_deq(comp_q, &qe); |
@@ -321,7 +1252,6 @@ bfa_timer_tick(struct bfa_s *bfa) | |||
321 | bfa_timer_beat(&bfa->timer_mod); | 1252 | bfa_timer_beat(&bfa->timer_mod); |
322 | } | 1253 | } |
323 | 1254 | ||
324 | #ifndef BFA_BIOS_BUILD | ||
325 | /** | 1255 | /** |
326 | * Return the list of PCI vendor/device id lists supported by this | 1256 | * Return the list of PCI vendor/device id lists supported by this |
327 | * BFA instance. | 1257 | * BFA instance. |
@@ -336,7 +1266,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) | |||
336 | {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, | 1266 | {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, |
337 | }; | 1267 | }; |
338 | 1268 | ||
339 | *npciids = ARRAY_SIZE(__pciids); | 1269 | *npciids = sizeof(__pciids) / sizeof(__pciids[0]); |
340 | *pciids = __pciids; | 1270 | *pciids = __pciids; |
341 | } | 1271 | } |
342 | 1272 | ||
@@ -351,7 +1281,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) | |||
351 | * void | 1281 | * void |
352 | * | 1282 | * |
353 | * Special Considerations: | 1283 | * Special Considerations: |
354 | * note | 1284 | * note |
355 | */ | 1285 | */ |
356 | void | 1286 | void |
357 | bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) | 1287 | bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) |
@@ -389,7 +1319,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) | |||
389 | cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; | 1319 | cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; |
390 | cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; | 1320 | cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; |
391 | cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; | 1321 | cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; |
392 | cfg->drvcfg.min_cfg = BFA_TRUE; | 1322 | cfg->drvcfg.min_cfg = BFA_TRUE; |
393 | } | 1323 | } |
394 | 1324 | ||
395 | void | 1325 | void |
@@ -417,7 +1347,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa) | |||
417 | } | 1347 | } |
418 | 1348 | ||
419 | /** | 1349 | /** |
420 | * Fetch firmware trace data. | 1350 | * Fetch firmware trace data. |
421 | * | 1351 | * |
422 | * @param[in] bfa BFA instance | 1352 | * @param[in] bfa BFA instance |
423 | * @param[out] trcdata Firmware trace buffer | 1353 | * @param[out] trcdata Firmware trace buffer |
@@ -433,6 +1363,22 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen) | |||
433 | } | 1363 | } |
434 | 1364 | ||
435 | /** | 1365 | /** |
1366 | * Dump firmware memory. | ||
1367 | * | ||
1368 | * @param[in] bfa BFA instance | ||
1369 | * @param[out] buf buffer for dump | ||
1370 | * @param[in,out] offset smem offset to start read | ||
1371 | * @param[in,out] buflen length of buffer | ||
1372 | * | ||
1373 | * @retval BFA_STATUS_OK Firmware memory is dumped. | ||
1374 | * @retval BFA_STATUS_INPROGRESS Firmware memory dump is in progress. | ||
1375 | */ | ||
1376 | bfa_status_t | ||
1377 | bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen) | ||
1378 | { | ||
1379 | return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); | ||
1380 | } | ||
1381 | /** | ||
436 | * Reset hw semaphore & usage cnt regs and initialize. | 1382 | * Reset hw semaphore & usage cnt regs and initialize. |
437 | */ | 1383 | */ |
438 | void | 1384 | void |
@@ -441,4 +1387,23 @@ bfa_chip_reset(struct bfa_s *bfa) | |||
441 | bfa_ioc_ownership_reset(&bfa->ioc); | 1387 | bfa_ioc_ownership_reset(&bfa->ioc); |
442 | bfa_ioc_pll_init(&bfa->ioc); | 1388 | bfa_ioc_pll_init(&bfa->ioc); |
443 | } | 1389 | } |
444 | #endif | 1390 | |
1391 | /** | ||
1392 | * Fetch firmware statistics data. | ||
1393 | * | ||
1394 | * @param[in] bfa BFA instance | ||
1395 | * @param[out] data Firmware stats buffer | ||
1396 | * | ||
1397 | * @retval BFA_STATUS_OK Firmware trace is fetched. | ||
1398 | */ | ||
1399 | bfa_status_t | ||
1400 | bfa_fw_stats_get(struct bfa_s *bfa, void *data) | ||
1401 | { | ||
1402 | return bfa_ioc_fw_stats_get(&bfa->ioc, data); | ||
1403 | } | ||
1404 | |||
1405 | bfa_status_t | ||
1406 | bfa_fw_stats_clear(struct bfa_s *bfa) | ||
1407 | { | ||
1408 | return bfa_ioc_fw_stats_clear(&bfa->ioc); | ||
1409 | } | ||