diff options
author | Rasesh Mody <rmody@brocade.com> | 2011-09-27 06:39:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-09-29 19:36:34 -0400 |
commit | be3a84d1364d2060f4045782a40db39ed21a5c66 (patch) | |
tree | 5470da48877c317c08f39e9b5b148b82129d26a9 /drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | |
parent | 3869f80605847b2235b9be01e59606d9b5606d96 (diff) |
bna: Brocade 1860 IOC PLL, Reg Defs and ASIC Mode Changes
Add logic to set ASIC specfic interface in IOC, HW interface initialization
APIs, mode based initialization and MSI-X resource allocation for 1860 with
no asic block. Add new h/w specific register definitions and setup registers
used by IOC logic.
Use normal kernel declaration style, c99 initializers and const for mailbox
structures. Remove unneeded parentheses.
Signed-off-by: Gurunatha Karaje <gkaraje@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c')
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | 400 |
1 files changed, 385 insertions, 15 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index 7d0d8ffc01bf..bc9e5988cd2f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | |||
@@ -37,7 +37,9 @@ | |||
37 | static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); | 37 | static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); |
38 | static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); | 38 | static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); |
39 | static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); | 39 | static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); |
40 | static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc); | ||
40 | static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); | 41 | static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); |
42 | static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc); | ||
41 | static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); | 43 | static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); |
42 | static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); | 44 | static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); |
43 | static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); | 45 | static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); |
@@ -48,6 +50,9 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); | |||
48 | static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); | 50 | static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); |
49 | static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, | 51 | static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, |
50 | enum bfi_asic_mode asic_mode); | 52 | enum bfi_asic_mode asic_mode); |
53 | static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, | ||
54 | enum bfi_asic_mode asic_mode); | ||
55 | static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc); | ||
51 | 56 | ||
52 | static const struct bfa_ioc_hwif nw_hwif_ct = { | 57 | static const struct bfa_ioc_hwif nw_hwif_ct = { |
53 | .ioc_pll_init = bfa_ioc_ct_pll_init, | 58 | .ioc_pll_init = bfa_ioc_ct_pll_init, |
@@ -65,6 +70,23 @@ static const struct bfa_ioc_hwif nw_hwif_ct = { | |||
65 | .ioc_sync_complete = bfa_ioc_ct_sync_complete, | 70 | .ioc_sync_complete = bfa_ioc_ct_sync_complete, |
66 | }; | 71 | }; |
67 | 72 | ||
73 | static const struct bfa_ioc_hwif nw_hwif_ct2 = { | ||
74 | .ioc_pll_init = bfa_ioc_ct2_pll_init, | ||
75 | .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, | ||
76 | .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, | ||
77 | .ioc_reg_init = bfa_ioc_ct2_reg_init, | ||
78 | .ioc_map_port = bfa_ioc_ct2_map_port, | ||
79 | .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat, | ||
80 | .ioc_isr_mode_set = NULL, | ||
81 | .ioc_notify_fail = bfa_ioc_ct_notify_fail, | ||
82 | .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, | ||
83 | .ioc_sync_start = bfa_ioc_ct_sync_start, | ||
84 | .ioc_sync_join = bfa_ioc_ct_sync_join, | ||
85 | .ioc_sync_leave = bfa_ioc_ct_sync_leave, | ||
86 | .ioc_sync_ack = bfa_ioc_ct_sync_ack, | ||
87 | .ioc_sync_complete = bfa_ioc_ct_sync_complete, | ||
88 | }; | ||
89 | |||
68 | /** | 90 | /** |
69 | * Called from bfa_ioc_attach() to map asic specific calls. | 91 | * Called from bfa_ioc_attach() to map asic specific calls. |
70 | */ | 92 | */ |
@@ -74,6 +96,12 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) | |||
74 | ioc->ioc_hwif = &nw_hwif_ct; | 96 | ioc->ioc_hwif = &nw_hwif_ct; |
75 | } | 97 | } |
76 | 98 | ||
99 | void | ||
100 | bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) | ||
101 | { | ||
102 | ioc->ioc_hwif = &nw_hwif_ct2; | ||
103 | } | ||
104 | |||
77 | /** | 105 | /** |
78 | * Return true if firmware of current driver matches the running firmware. | 106 | * Return true if firmware of current driver matches the running firmware. |
79 | */ | 107 | */ |
@@ -170,7 +198,11 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) | |||
170 | /** | 198 | /** |
171 | * Host to LPU mailbox message addresses | 199 | * Host to LPU mailbox message addresses |
172 | */ | 200 | */ |
173 | static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = { | 201 | static const struct { |
202 | u32 hfn_mbox; | ||
203 | u32 lpu_mbox; | ||
204 | u32 hfn_pgn; | ||
205 | } ct_fnreg[] = { | ||
174 | { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, | 206 | { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, |
175 | { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, | 207 | { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, |
176 | { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, | 208 | { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, |
@@ -180,7 +212,10 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = { | |||
180 | /** | 212 | /** |
181 | * Host <-> LPU mailbox command/status registers - port 0 | 213 | * Host <-> LPU mailbox command/status registers - port 0 |
182 | */ | 214 | */ |
183 | static struct { u32 hfn, lpu; } ct_p0reg[] = { | 215 | static const struct { |
216 | u32 hfn; | ||
217 | u32 lpu; | ||
218 | } ct_p0reg[] = { | ||
184 | { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, | 219 | { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, |
185 | { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, | 220 | { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, |
186 | { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, | 221 | { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, |
@@ -190,13 +225,32 @@ static struct { u32 hfn, lpu; } ct_p0reg[] = { | |||
190 | /** | 225 | /** |
191 | * Host <-> LPU mailbox command/status registers - port 1 | 226 | * Host <-> LPU mailbox command/status registers - port 1 |
192 | */ | 227 | */ |
193 | static struct { u32 hfn, lpu; } ct_p1reg[] = { | 228 | static const struct { |
229 | u32 hfn; | ||
230 | u32 lpu; | ||
231 | } ct_p1reg[] = { | ||
194 | { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, | 232 | { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, |
195 | { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, | 233 | { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, |
196 | { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, | 234 | { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, |
197 | { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } | 235 | { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } |
198 | }; | 236 | }; |
199 | 237 | ||
238 | static const struct { | ||
239 | u32 hfn_mbox; | ||
240 | u32 lpu_mbox; | ||
241 | u32 hfn_pgn; | ||
242 | u32 hfn; | ||
243 | u32 lpu; | ||
244 | u32 lpu_read; | ||
245 | } ct2_reg[] = { | ||
246 | { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, | ||
247 | CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, | ||
248 | CT2_HOSTFN_LPU0_READ_STAT}, | ||
249 | { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, | ||
250 | CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, | ||
251 | CT2_HOSTFN_LPU1_READ_STAT}, | ||
252 | }; | ||
253 | |||
200 | static void | 254 | static void |
201 | bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) | 255 | bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) |
202 | { | 256 | { |
@@ -218,8 +272,8 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) | |||
218 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; | 272 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; |
219 | ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; | 273 | ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; |
220 | } else { | 274 | } else { |
221 | ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); | 275 | ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; |
222 | ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); | 276 | ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; |
223 | ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; | 277 | ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; |
224 | ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; | 278 | ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; |
225 | ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; | 279 | ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; |
@@ -230,24 +284,24 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) | |||
230 | /* | 284 | /* |
231 | * PSS control registers | 285 | * PSS control registers |
232 | */ | 286 | */ |
233 | ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); | 287 | ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; |
234 | ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); | 288 | ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; |
235 | ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG); | 289 | ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; |
236 | ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG); | 290 | ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; |
237 | 291 | ||
238 | /* | 292 | /* |
239 | * IOC semaphore registers and serialization | 293 | * IOC semaphore registers and serialization |
240 | */ | 294 | */ |
241 | ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); | 295 | ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; |
242 | ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); | 296 | ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; |
243 | ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); | 297 | ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; |
244 | ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); | 298 | ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; |
245 | ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); | 299 | ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; |
246 | 300 | ||
247 | /** | 301 | /** |
248 | * sram memory access | 302 | * sram memory access |
249 | */ | 303 | */ |
250 | ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); | 304 | ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; |
251 | ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; | 305 | ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; |
252 | 306 | ||
253 | /* | 307 | /* |
@@ -256,6 +310,64 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) | |||
256 | ioc->ioc_regs.err_set = (rb + ERR_SET_REG); | 310 | ioc->ioc_regs.err_set = (rb + ERR_SET_REG); |
257 | } | 311 | } |
258 | 312 | ||
313 | static void | ||
314 | bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) | ||
315 | { | ||
316 | void __iomem *rb; | ||
317 | int port = bfa_ioc_portid(ioc); | ||
318 | |||
319 | rb = bfa_ioc_bar0(ioc); | ||
320 | |||
321 | ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; | ||
322 | ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; | ||
323 | ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; | ||
324 | ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; | ||
325 | ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; | ||
326 | ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; | ||
327 | |||
328 | if (port == 0) { | ||
329 | ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; | ||
330 | ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; | ||
331 | ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; | ||
332 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; | ||
333 | ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; | ||
334 | } else { | ||
335 | ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; | ||
336 | ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; | ||
337 | ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; | ||
338 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; | ||
339 | ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * PSS control registers | ||
344 | */ | ||
345 | ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; | ||
346 | ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; | ||
347 | ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; | ||
348 | ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; | ||
349 | |||
350 | /* | ||
351 | * IOC semaphore registers and serialization | ||
352 | */ | ||
353 | ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; | ||
354 | ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; | ||
355 | ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; | ||
356 | ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; | ||
357 | ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; | ||
358 | |||
359 | /** | ||
360 | * sram memory access | ||
361 | */ | ||
362 | ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; | ||
363 | ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; | ||
364 | |||
365 | /* | ||
366 | * err set reg : for notification of hb failure in fcmode | ||
367 | */ | ||
368 | ioc->ioc_regs.err_set = rb + ERR_SET_REG; | ||
369 | } | ||
370 | |||
259 | /** | 371 | /** |
260 | * Initialize IOC to port mapping. | 372 | * Initialize IOC to port mapping. |
261 | */ | 373 | */ |
@@ -276,6 +388,16 @@ bfa_ioc_ct_map_port(struct bfa_ioc *ioc) | |||
276 | 388 | ||
277 | } | 389 | } |
278 | 390 | ||
391 | static void | ||
392 | bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) | ||
393 | { | ||
394 | void __iomem *rb = ioc->pcidev.pci_bar_kva; | ||
395 | u32 r32; | ||
396 | |||
397 | r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); | ||
398 | ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); | ||
399 | } | ||
400 | |||
279 | /** | 401 | /** |
280 | * Set interrupt mode for a function: INTX or MSIX | 402 | * Set interrupt mode for a function: INTX or MSIX |
281 | */ | 403 | */ |
@@ -307,6 +429,50 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) | |||
307 | writel(r32, rb + FNC_PERS_REG); | 429 | writel(r32, rb + FNC_PERS_REG); |
308 | } | 430 | } |
309 | 431 | ||
432 | static bool | ||
433 | bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) | ||
434 | { | ||
435 | u32 r32; | ||
436 | |||
437 | r32 = readl(ioc->ioc_regs.lpu_read_stat); | ||
438 | if (r32) { | ||
439 | writel(1, ioc->ioc_regs.lpu_read_stat); | ||
440 | return true; | ||
441 | } | ||
442 | |||
443 | return false; | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * MSI-X resource allocation for 1860 with no asic block | ||
448 | */ | ||
449 | #define HOSTFN_MSIX_DEFAULT 64 | ||
450 | #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 | ||
451 | #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c | ||
452 | #define __MSIX_VT_NUMVT__MK 0x003ff800 | ||
453 | #define __MSIX_VT_NUMVT__SH 11 | ||
454 | #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) | ||
455 | #define __MSIX_VT_OFST_ 0x000007ff | ||
456 | void | ||
457 | bfa_ioc_ct2_poweron(struct bfa_ioc *ioc) | ||
458 | { | ||
459 | void __iomem *rb = ioc->pcidev.pci_bar_kva; | ||
460 | u32 r32; | ||
461 | |||
462 | r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); | ||
463 | if (r32 & __MSIX_VT_NUMVT__MK) { | ||
464 | writel(r32 & __MSIX_VT_OFST_, | ||
465 | rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); | ||
466 | return; | ||
467 | } | ||
468 | |||
469 | writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | | ||
470 | HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), | ||
471 | rb + HOSTFN_MSIX_VT_OFST_NUMVT); | ||
472 | writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), | ||
473 | rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); | ||
474 | } | ||
475 | |||
310 | /** | 476 | /** |
311 | * Cleanup hw semaphore and usecnt registers | 477 | * Cleanup hw semaphore and usecnt registers |
312 | */ | 478 | */ |
@@ -499,3 +665,207 @@ bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) | |||
499 | writel(0, (rb + MBIST_CTL_REG)); | 665 | writel(0, (rb + MBIST_CTL_REG)); |
500 | return BFA_STATUS_OK; | 666 | return BFA_STATUS_OK; |
501 | } | 667 | } |
668 | |||
669 | static void | ||
670 | bfa_ioc_ct2_sclk_init(void __iomem *rb) | ||
671 | { | ||
672 | u32 r32; | ||
673 | |||
674 | /* | ||
675 | * put s_clk PLL and PLL FSM in reset | ||
676 | */ | ||
677 | r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
678 | r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); | ||
679 | r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | | ||
680 | __APP_PLL_SCLK_LOGIC_SOFT_RESET); | ||
681 | writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
682 | |||
683 | /* | ||
684 | * Ignore mode and program for the max clock (which is FC16) | ||
685 | * Firmware/NFC will do the PLL init appropiately | ||
686 | */ | ||
687 | r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
688 | r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); | ||
689 | writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
690 | |||
691 | /* | ||
692 | * while doing PLL init dont clock gate ethernet subsystem | ||
693 | */ | ||
694 | r32 = readl((rb + CT2_CHIP_MISC_PRG)); | ||
695 | writel((r32 | __ETH_CLK_ENABLE_PORT0), | ||
696 | (rb + CT2_CHIP_MISC_PRG)); | ||
697 | |||
698 | r32 = readl((rb + CT2_PCIE_MISC_REG)); | ||
699 | writel((r32 | __ETH_CLK_ENABLE_PORT1), | ||
700 | (rb + CT2_PCIE_MISC_REG)); | ||
701 | |||
702 | /* | ||
703 | * set sclk value | ||
704 | */ | ||
705 | r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
706 | r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | | ||
707 | __APP_PLL_SCLK_CLK_DIV2); | ||
708 | writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
709 | |||
710 | /* | ||
711 | * poll for s_clk lock or delay 1ms | ||
712 | */ | ||
713 | udelay(1000); | ||
714 | |||
715 | /* | ||
716 | * Dont do clock gating for ethernet subsystem, firmware/NFC will | ||
717 | * do this appropriately | ||
718 | */ | ||
719 | } | ||
720 | |||
721 | static void | ||
722 | bfa_ioc_ct2_lclk_init(void __iomem *rb) | ||
723 | { | ||
724 | u32 r32; | ||
725 | |||
726 | /* | ||
727 | * put l_clk PLL and PLL FSM in reset | ||
728 | */ | ||
729 | r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
730 | r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); | ||
731 | r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | | ||
732 | __APP_PLL_LCLK_LOGIC_SOFT_RESET); | ||
733 | writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
734 | |||
735 | /* | ||
736 | * set LPU speed (set for FC16 which will work for other modes) | ||
737 | */ | ||
738 | r32 = readl((rb + CT2_CHIP_MISC_PRG)); | ||
739 | writel(r32, (rb + CT2_CHIP_MISC_PRG)); | ||
740 | |||
741 | /* | ||
742 | * set LPU half speed (set for FC16 which will work for other modes) | ||
743 | */ | ||
744 | r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
745 | writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
746 | |||
747 | /* | ||
748 | * set lclk for mode (set for FC16) | ||
749 | */ | ||
750 | r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
751 | r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); | ||
752 | r32 |= 0x20c1731b; | ||
753 | writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
754 | |||
755 | /* | ||
756 | * poll for s_clk lock or delay 1ms | ||
757 | */ | ||
758 | udelay(1000); | ||
759 | } | ||
760 | |||
761 | static void | ||
762 | bfa_ioc_ct2_mem_init(void __iomem *rb) | ||
763 | { | ||
764 | u32 r32; | ||
765 | |||
766 | r32 = readl((rb + PSS_CTL_REG)); | ||
767 | r32 &= ~__PSS_LMEM_RESET; | ||
768 | writel(r32, (rb + PSS_CTL_REG)); | ||
769 | udelay(1000); | ||
770 | |||
771 | writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); | ||
772 | udelay(1000); | ||
773 | writel(0, (rb + CT2_MBIST_CTL_REG)); | ||
774 | } | ||
775 | |||
776 | static void | ||
777 | bfa_ioc_ct2_mac_reset(void __iomem *rb) | ||
778 | { | ||
779 | volatile u32 r32; | ||
780 | |||
781 | bfa_ioc_ct2_sclk_init(rb); | ||
782 | bfa_ioc_ct2_lclk_init(rb); | ||
783 | |||
784 | /* | ||
785 | * release soft reset on s_clk & l_clk | ||
786 | */ | ||
787 | r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
788 | writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET), | ||
789 | (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
790 | |||
791 | /* | ||
792 | * release soft reset on s_clk & l_clk | ||
793 | */ | ||
794 | r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
795 | writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET), | ||
796 | (rb + CT2_APP_PLL_LCLK_CTL_REG)); | ||
797 | |||
798 | /* put port0, port1 MAC & AHB in reset */ | ||
799 | writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), | ||
800 | (rb + CT2_CSI_MAC_CONTROL_REG(0))); | ||
801 | writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), | ||
802 | (rb + CT2_CSI_MAC_CONTROL_REG(1))); | ||
803 | } | ||
804 | |||
805 | #define CT2_NFC_MAX_DELAY 1000 | ||
806 | static enum bfa_status | ||
807 | bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) | ||
808 | { | ||
809 | volatile u32 wgn, r32; | ||
810 | int i; | ||
811 | |||
812 | /* | ||
813 | * Initialize PLL if not already done by NFC | ||
814 | */ | ||
815 | wgn = readl(rb + CT2_WGN_STATUS); | ||
816 | if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { | ||
817 | writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); | ||
818 | for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { | ||
819 | r32 = readl(rb + CT2_NFC_CSR_SET_REG); | ||
820 | if (r32 & __NFC_CONTROLLER_HALTED) | ||
821 | break; | ||
822 | udelay(1000); | ||
823 | } | ||
824 | } | ||
825 | |||
826 | /* | ||
827 | * Mask the interrupts and clear any | ||
828 | * pending interrupts left by BIOS/EFI | ||
829 | */ | ||
830 | |||
831 | writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); | ||
832 | writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); | ||
833 | |||
834 | r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); | ||
835 | if (r32 == 1) { | ||
836 | writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); | ||
837 | readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); | ||
838 | } | ||
839 | r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); | ||
840 | if (r32 == 1) { | ||
841 | writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); | ||
842 | readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); | ||
843 | } | ||
844 | |||
845 | bfa_ioc_ct2_mac_reset(rb); | ||
846 | bfa_ioc_ct2_sclk_init(rb); | ||
847 | bfa_ioc_ct2_lclk_init(rb); | ||
848 | |||
849 | /* | ||
850 | * release soft reset on s_clk & l_clk | ||
851 | */ | ||
852 | r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
853 | writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET), | ||
854 | (rb + CT2_APP_PLL_SCLK_CTL_REG)); | ||
855 | |||
856 | /* | ||
857 | * Announce flash device presence, if flash was corrupted. | ||
858 | */ | ||
859 | if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { | ||
860 | r32 = readl((rb + PSS_GPIO_OUT_REG)); | ||
861 | writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG)); | ||
862 | r32 = readl((rb + PSS_GPIO_OE_REG)); | ||
863 | writel((r32 | 1), (rb + PSS_GPIO_OE_REG)); | ||
864 | } | ||
865 | |||
866 | bfa_ioc_ct2_mem_init(rb); | ||
867 | |||
868 | writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); | ||
869 | writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); | ||
870 | return BFA_STATUS_OK; | ||
871 | } | ||