aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorRasesh Mody <rmody@brocade.com>2010-08-26 02:00:27 -0400
committerDavid S. Miller <davem@davemloft.net>2010-08-26 02:00:27 -0400
commit8a891429d1879ae4f37f547ef5c2d68e19277e4a (patch)
treeac759c522cbc86c4373d6086ccd800e9aea09c7f /drivers/net
parentced1de4c9eeded664e5f1b21cfcb0fb70cc0cde3 (diff)
bna: Fixed build break for allyesconfig
This is the patch to fix the build break caused by multiple definitions of symbols between Brocade's FC/FCOE driver(BFA) and 10G Networking Driver(BNA). Changes are: 1. locally used functions are made static 2. unused functions are removed 3. using unique namespaces for the function names that must be globally visible Signed-off-by: Debashis Dutt <ddutt@brocade.com> Signed-off-by: Rasesh Mody <rmody@brocade.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bna/bfa_cee.c136
-rw-r--r--drivers/net/bna/bfa_cee.h14
-rw-r--r--drivers/net/bna/bfa_ioc.c249
-rw-r--r--drivers/net/bna/bfa_ioc.h96
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c49
-rw-r--r--drivers/net/bna/bna_ctrl.c38
-rw-r--r--drivers/net/bna/bnad.c9
-rw-r--r--drivers/net/bna/bnad_ethtool.c2
8 files changed, 162 insertions, 431 deletions
diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/bna/bfa_cee.c
index 1545fc9720f8..f7b789a3b217 100644
--- a/drivers/net/bna/bfa_cee.c
+++ b/drivers/net/bna/bfa_cee.c
@@ -152,7 +152,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
152 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); 152 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
153} 153}
154/** 154/**
155 * bfa_cee_meminfo() 155 * bfa_nw_cee_meminfo()
156 * 156 *
157 * @brief Returns the size of the DMA memory needed by CEE module 157 * @brief Returns the size of the DMA memory needed by CEE module
158 * 158 *
@@ -161,13 +161,13 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
161 * @return Size of DMA region 161 * @return Size of DMA region
162 */ 162 */
163u32 163u32
164bfa_cee_meminfo(void) 164bfa_nw_cee_meminfo(void)
165{ 165{
166 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); 166 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
167} 167}
168 168
169/** 169/**
170 * bfa_cee_mem_claim() 170 * bfa_nw_cee_mem_claim()
171 * 171 *
172 * @brief Initialized CEE DMA Memory 172 * @brief Initialized CEE DMA Memory
173 * 173 *
@@ -178,7 +178,7 @@ bfa_cee_meminfo(void)
178 * @return void 178 * @return void
179 */ 179 */
180void 180void
181bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) 181bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
182{ 182{
183 cee->attr_dma.kva = dma_kva; 183 cee->attr_dma.kva = dma_kva;
184 cee->attr_dma.pa = dma_pa; 184 cee->attr_dma.pa = dma_pa;
@@ -190,108 +190,6 @@ bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
190} 190}
191 191
192/** 192/**
193 * bfa_cee_get_attr()
194 *
195 * @brief
196 * Send the request to the f/w to fetch CEE attributes.
197 *
198 * @param[in] Pointer to the CEE module data structure.
199 *
200 * @return Status
201 */
202
203enum bfa_status
204bfa_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
205 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
206{
207 struct bfi_cee_get_req *cmd;
208
209 BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
210 if (!bfa_ioc_is_operational(cee->ioc))
211 return BFA_STATUS_IOC_FAILURE;
212 if (cee->get_attr_pending == true)
213 return BFA_STATUS_DEVBUSY;
214 cee->get_attr_pending = true;
215 cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
216 cee->attr = attr;
217 cee->cbfn.get_attr_cbfn = cbfn;
218 cee->cbfn.get_attr_cbarg = cbarg;
219 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
220 bfa_ioc_portid(cee->ioc));
221 bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
222 bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
223
224 return BFA_STATUS_OK;
225}
226
227/**
228 * bfa_cee_get_stats()
229 *
230 * @brief
231 * Send the request to the f/w to fetch CEE statistics.
232 *
233 * @param[in] Pointer to the CEE module data structure.
234 *
235 * @return Status
236 */
237
238enum bfa_status
239bfa_cee_get_stats(struct bfa_cee *cee, struct bfa_cee_stats *stats,
240 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
241{
242 struct bfi_cee_get_req *cmd;
243
244 BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
245
246 if (!bfa_ioc_is_operational(cee->ioc))
247 return BFA_STATUS_IOC_FAILURE;
248 if (cee->get_stats_pending == true)
249 return BFA_STATUS_DEVBUSY;
250 cee->get_stats_pending = true;
251 cmd = (struct bfi_cee_get_req *) cee->get_stats_mb.msg;
252 cee->stats = stats;
253 cee->cbfn.get_stats_cbfn = cbfn;
254 cee->cbfn.get_stats_cbarg = cbarg;
255 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
256 bfa_ioc_portid(cee->ioc));
257 bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
258 bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
259
260 return BFA_STATUS_OK;
261}
262
263/**
264 * bfa_cee_reset_stats()
265 *
266 * @brief Clears CEE Stats in the f/w.
267 *
268 * @param[in] Pointer to the CEE module data structure.
269 *
270 * @return Status
271 */
272
273enum bfa_status
274bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn,
275 void *cbarg)
276{
277 struct bfi_cee_reset_stats *cmd;
278
279 BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
280 if (!bfa_ioc_is_operational(cee->ioc))
281 return BFA_STATUS_IOC_FAILURE;
282 if (cee->reset_stats_pending == true)
283 return BFA_STATUS_DEVBUSY;
284 cee->reset_stats_pending = true;
285 cmd = (struct bfi_cee_reset_stats *) cee->reset_stats_mb.msg;
286 cee->cbfn.reset_stats_cbfn = cbfn;
287 cee->cbfn.reset_stats_cbarg = cbarg;
288 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
289 bfa_ioc_portid(cee->ioc));
290 bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
291 return BFA_STATUS_OK;
292}
293
294/**
295 * bfa_cee_isrs() 193 * bfa_cee_isrs()
296 * 194 *
297 * @brief Handles Mail-box interrupts for CEE module. 195 * @brief Handles Mail-box interrupts for CEE module.
@@ -301,7 +199,7 @@ bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn,
301 * @return void 199 * @return void
302 */ 200 */
303 201
304void 202static void
305bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) 203bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
306{ 204{
307 union bfi_cee_i2h_msg_u *msg; 205 union bfi_cee_i2h_msg_u *msg;
@@ -334,7 +232,7 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
334 * @return void 232 * @return void
335 */ 233 */
336 234
337void 235static void
338bfa_cee_hbfail(void *arg) 236bfa_cee_hbfail(void *arg)
339{ 237{
340 struct bfa_cee *cee; 238 struct bfa_cee *cee;
@@ -367,7 +265,7 @@ bfa_cee_hbfail(void *arg)
367} 265}
368 266
369/** 267/**
370 * bfa_cee_attach() 268 * bfa_nw_cee_attach()
371 * 269 *
372 * @brief CEE module-attach API 270 * @brief CEE module-attach API
373 * 271 *
@@ -380,28 +278,14 @@ bfa_cee_hbfail(void *arg)
380 * @return void 278 * @return void
381 */ 279 */
382void 280void
383bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, 281bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
384 void *dev) 282 void *dev)
385{ 283{
386 BUG_ON(!(cee != NULL)); 284 BUG_ON(!(cee != NULL));
387 cee->dev = dev; 285 cee->dev = dev;
388 cee->ioc = ioc; 286 cee->ioc = ioc;
389 287
390 bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); 288 bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
391 bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee); 289 bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
392 bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail); 290 bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
393}
394
395/**
396 * bfa_cee_detach()
397 *
398 * @brief CEE module-detach API
399 *
400 * @param[in] cee - Pointer to the CEE module data structure
401 *
402 * @return void
403 */
404void
405bfa_cee_detach(struct bfa_cee *cee)
406{
407} 291}
diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/bna/bfa_cee.h
index 1208cadeceed..20543d15b64f 100644
--- a/drivers/net/bna/bfa_cee.h
+++ b/drivers/net/bna/bfa_cee.h
@@ -56,17 +56,9 @@ struct bfa_cee {
56 struct bfa_mbox_cmd reset_stats_mb; 56 struct bfa_mbox_cmd reset_stats_mb;
57}; 57};
58 58
59u32 bfa_cee_meminfo(void); 59u32 bfa_nw_cee_meminfo(void);
60void bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, 60void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
61 u64 dma_pa); 61 u64 dma_pa);
62void bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); 62void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
63void bfa_cee_detach(struct bfa_cee *cee);
64enum bfa_status bfa_cee_get_attr(struct bfa_cee *cee,
65 struct bfa_cee_attr *attr, bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
66enum bfa_status bfa_cee_get_stats(struct bfa_cee *cee,
67 struct bfa_cee_stats *stats, bfa_cee_get_stats_cbfn_t cbfn,
68 void *cbarg);
69enum bfa_status bfa_cee_reset_stats(struct bfa_cee *cee,
70 bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
71 63
72#endif /* __BFA_CEE_H__ */ 64#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index cdc2cb1597ec..caa45c2185e9 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -65,7 +65,7 @@
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd)) 66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
67 67
68bool bfa_auto_recover = true; 68bool bfa_nw_auto_recover = true;
69 69
70/* 70/*
71 * forward declarations 71 * forward declarations
@@ -85,6 +85,23 @@ static void bfa_ioc_recover(struct bfa_ioc *ioc);
85static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); 85static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
86static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 86static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
87static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 87static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
88static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
89 u32 boot_param);
90static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
91static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
92static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
93 char *serial_num);
94static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
95 char *fw_ver);
96static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
97 char *chip_rev);
98static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
99 char *optrom_ver);
100static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
101 char *manufacturer);
102static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
103static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
104static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
88 105
89/** 106/**
90 * IOC state machine events 107 * IOC state machine events
@@ -138,7 +155,7 @@ static void
138bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 155bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
139{ 156{
140 ioc->retry_count = 0; 157 ioc->retry_count = 0;
141 ioc->auto_recover = bfa_auto_recover; 158 ioc->auto_recover = bfa_nw_auto_recover;
142} 159}
143 160
144/** 161/**
@@ -185,7 +202,7 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
185 ioc->retry_count = 0; 202 ioc->retry_count = 0;
186 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 203 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
187 } else { 204 } else {
188 bfa_ioc_hw_sem_release(ioc); 205 bfa_nw_ioc_hw_sem_release(ioc);
189 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); 206 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
190 } 207 }
191 break; 208 break;
@@ -314,12 +331,12 @@ bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
314 break; 331 break;
315 } 332 }
316 333
317 bfa_ioc_hw_sem_release(ioc); 334 bfa_nw_ioc_hw_sem_release(ioc);
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 335 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
319 break; 336 break;
320 337
321 case IOC_E_DISABLE: 338 case IOC_E_DISABLE:
322 bfa_ioc_hw_sem_release(ioc); 339 bfa_nw_ioc_hw_sem_release(ioc);
323 bfa_ioc_timer_stop(ioc); 340 bfa_ioc_timer_stop(ioc);
324 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 341 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
325 break; 342 break;
@@ -346,7 +363,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
346 switch (event) { 363 switch (event) {
347 case IOC_E_FWRSP_ENABLE: 364 case IOC_E_FWRSP_ENABLE:
348 bfa_ioc_timer_stop(ioc); 365 bfa_ioc_timer_stop(ioc);
349 bfa_ioc_hw_sem_release(ioc); 366 bfa_nw_ioc_hw_sem_release(ioc);
350 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 367 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
351 break; 368 break;
352 369
@@ -363,13 +380,13 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
363 break; 380 break;
364 } 381 }
365 382
366 bfa_ioc_hw_sem_release(ioc); 383 bfa_nw_ioc_hw_sem_release(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 384 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
368 break; 385 break;
369 386
370 case IOC_E_DISABLE: 387 case IOC_E_DISABLE:
371 bfa_ioc_timer_stop(ioc); 388 bfa_ioc_timer_stop(ioc);
372 bfa_ioc_hw_sem_release(ioc); 389 bfa_nw_ioc_hw_sem_release(ioc);
373 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 390 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
374 break; 391 break;
375 392
@@ -662,7 +679,7 @@ bfa_ioc_disable_comp(struct bfa_ioc *ioc)
662} 679}
663 680
664void 681void
665bfa_ioc_sem_timeout(void *ioc_arg) 682bfa_nw_ioc_sem_timeout(void *ioc_arg)
666{ 683{
667 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 684 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
668 685
@@ -670,7 +687,7 @@ bfa_ioc_sem_timeout(void *ioc_arg)
670} 687}
671 688
672bool 689bool
673bfa_ioc_sem_get(void __iomem *sem_reg) 690bfa_nw_ioc_sem_get(void __iomem *sem_reg)
674{ 691{
675 u32 r32; 692 u32 r32;
676 int cnt = 0; 693 int cnt = 0;
@@ -692,7 +709,7 @@ bfa_ioc_sem_get(void __iomem *sem_reg)
692} 709}
693 710
694void 711void
695bfa_ioc_sem_release(void __iomem *sem_reg) 712bfa_nw_ioc_sem_release(void __iomem *sem_reg)
696{ 713{
697 writel(1, sem_reg); 714 writel(1, sem_reg);
698} 715}
@@ -717,7 +734,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
717} 734}
718 735
719void 736void
720bfa_ioc_hw_sem_release(struct bfa_ioc *ioc) 737bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
721{ 738{
722 writel(1, ioc->ioc_regs.ioc_sem_reg); 739 writel(1, ioc->ioc_regs.ioc_sem_reg);
723} 740}
@@ -800,7 +817,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
800 * Get driver and firmware versions. 817 * Get driver and firmware versions.
801 */ 818 */
802void 819void
803bfa_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 820bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
804{ 821{
805 u32 pgnum, pgoff; 822 u32 pgnum, pgoff;
806 u32 loff = 0; 823 u32 loff = 0;
@@ -823,7 +840,7 @@ bfa_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
823 * Returns TRUE if same. 840 * Returns TRUE if same.
824 */ 841 */
825bool 842bool
826bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 843bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
827{ 844{
828 struct bfi_ioc_image_hdr *drv_fwhdr; 845 struct bfi_ioc_image_hdr *drv_fwhdr;
829 int i; 846 int i;
@@ -854,7 +871,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
854 if (bfa_ioc_is_optrom(ioc)) 871 if (bfa_ioc_is_optrom(ioc))
855 return true; 872 return true;
856 873
857 bfa_ioc_fwver_get(ioc, &fwhdr); 874 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
858 drv_fwhdr = (struct bfi_ioc_image_hdr *) 875 drv_fwhdr = (struct bfi_ioc_image_hdr *)
859 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 876 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
860 877
@@ -864,7 +881,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
864 if (fwhdr.exec != drv_fwhdr->exec) 881 if (fwhdr.exec != drv_fwhdr->exec)
865 return false; 882 return false;
866 883
867 return bfa_ioc_fwver_cmp(ioc, &fwhdr); 884 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
868} 885}
869 886
870/** 887/**
@@ -941,14 +958,14 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
941} 958}
942 959
943void 960void
944bfa_ioc_timeout(void *ioc_arg) 961bfa_nw_ioc_timeout(void *ioc_arg)
945{ 962{
946 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 963 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
947 964
948 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 965 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
949} 966}
950 967
951void 968static void
952bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) 969bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
953{ 970{
954 u32 *msgp = (u32 *) ioc_msg; 971 u32 *msgp = (u32 *) ioc_msg;
@@ -1009,7 +1026,7 @@ bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1009} 1026}
1010 1027
1011void 1028void
1012bfa_ioc_hb_check(void *cbarg) 1029bfa_nw_ioc_hb_check(void *cbarg)
1013{ 1030{
1014 struct bfa_ioc *ioc = cbarg; 1031 struct bfa_ioc *ioc = cbarg;
1015 u32 hb_count; 1032 u32 hb_count;
@@ -1195,13 +1212,13 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1195/** 1212/**
1196 * IOC public 1213 * IOC public
1197 */ 1214 */
1198enum bfa_status 1215static enum bfa_status
1199bfa_ioc_pll_init(struct bfa_ioc *ioc) 1216bfa_ioc_pll_init(struct bfa_ioc *ioc)
1200{ 1217{
1201 /* 1218 /*
1202 * Hold semaphore so that nobody can access the chip during init. 1219 * Hold semaphore so that nobody can access the chip during init.
1203 */ 1220 */
1204 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 1221 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1205 1222
1206 bfa_ioc_pll_init_asic(ioc); 1223 bfa_ioc_pll_init_asic(ioc);
1207 1224
@@ -1209,7 +1226,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1209 /* 1226 /*
1210 * release semaphore. 1227 * release semaphore.
1211 */ 1228 */
1212 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 1229 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1213 1230
1214 return BFA_STATUS_OK; 1231 return BFA_STATUS_OK;
1215} 1232}
@@ -1218,7 +1235,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1218 * Interface used by diag module to do firmware boot with memory test 1235 * Interface used by diag module to do firmware boot with memory test
1219 * as the entry vector. 1236 * as the entry vector.
1220 */ 1237 */
1221void 1238static void
1222bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) 1239bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1223{ 1240{
1224 void __iomem *rb; 1241 void __iomem *rb;
@@ -1254,28 +1271,18 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1254 * Enable/disable IOC failure auto recovery. 1271 * Enable/disable IOC failure auto recovery.
1255 */ 1272 */
1256void 1273void
1257bfa_ioc_auto_recover(bool auto_recover) 1274bfa_nw_ioc_auto_recover(bool auto_recover)
1258{ 1275{
1259 bfa_auto_recover = auto_recover; 1276 bfa_nw_auto_recover = auto_recover;
1260} 1277}
1261 1278
1262bool 1279bool
1263bfa_ioc_is_operational(struct bfa_ioc *ioc) 1280bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
1264{ 1281{
1265 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 1282 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1266} 1283}
1267 1284
1268bool 1285static void
1269bfa_ioc_is_initialized(struct bfa_ioc *ioc)
1270{
1271 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1272
1273 return ((r32 != BFI_IOC_UNINIT) &&
1274 (r32 != BFI_IOC_INITING) &&
1275 (r32 != BFI_IOC_MEMTEST));
1276}
1277
1278void
1279bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 1286bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1280{ 1287{
1281 u32 *msgp = mbmsg; 1288 u32 *msgp = mbmsg;
@@ -1299,7 +1306,7 @@ bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1299 readl(ioc->ioc_regs.lpu_mbox_cmd); 1306 readl(ioc->ioc_regs.lpu_mbox_cmd);
1300} 1307}
1301 1308
1302void 1309static void
1303bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 1310bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1304{ 1311{
1305 union bfi_ioc_i2h_msg_u *msg; 1312 union bfi_ioc_i2h_msg_u *msg;
@@ -1340,7 +1347,7 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1340 * @param[in] bfa driver instance structure 1347 * @param[in] bfa driver instance structure
1341 */ 1348 */
1342void 1349void
1343bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 1350bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1344{ 1351{
1345 ioc->bfa = bfa; 1352 ioc->bfa = bfa;
1346 ioc->cbfn = cbfn; 1353 ioc->cbfn = cbfn;
@@ -1358,7 +1365,7 @@ bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1358 * Driver detach time IOC cleanup. 1365 * Driver detach time IOC cleanup.
1359 */ 1366 */
1360void 1367void
1361bfa_ioc_detach(struct bfa_ioc *ioc) 1368bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1362{ 1369{
1363 bfa_fsm_send_event(ioc, IOC_E_DETACH); 1370 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1364} 1371}
@@ -1369,7 +1376,7 @@ bfa_ioc_detach(struct bfa_ioc *ioc)
1369 * @param[in] pcidev PCI device information for this IOC 1376 * @param[in] pcidev PCI device information for this IOC
1370 */ 1377 */
1371void 1378void
1372bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 1379bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1373 enum bfi_mclass mc) 1380 enum bfi_mclass mc)
1374{ 1381{
1375 ioc->ioc_mc = mc; 1382 ioc->ioc_mc = mc;
@@ -1377,7 +1384,7 @@ bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1377 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 1384 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1378 ioc->cna = ioc->ctdev && !ioc->fcmode; 1385 ioc->cna = ioc->ctdev && !ioc->fcmode;
1379 1386
1380 bfa_ioc_set_ct_hwif(ioc); 1387 bfa_nw_ioc_set_ct_hwif(ioc);
1381 1388
1382 bfa_ioc_map_port(ioc); 1389 bfa_ioc_map_port(ioc);
1383 bfa_ioc_reg_init(ioc); 1390 bfa_ioc_reg_init(ioc);
@@ -1390,7 +1397,7 @@ bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1390 * @param[in] dm_pa physical address of IOC dma memory 1397 * @param[in] dm_pa physical address of IOC dma memory
1391 */ 1398 */
1392void 1399void
1393bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 1400bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
1394{ 1401{
1395 /** 1402 /**
1396 * dma memory for firmware attribute 1403 * dma memory for firmware attribute
@@ -1404,13 +1411,13 @@ bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
1404 * Return size of dma memory required. 1411 * Return size of dma memory required.
1405 */ 1412 */
1406u32 1413u32
1407bfa_ioc_meminfo(void) 1414bfa_nw_ioc_meminfo(void)
1408{ 1415{
1409 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); 1416 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1410} 1417}
1411 1418
1412void 1419void
1413bfa_ioc_enable(struct bfa_ioc *ioc) 1420bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1414{ 1421{
1415 bfa_ioc_stats(ioc, ioc_enables); 1422 bfa_ioc_stats(ioc, ioc_enables);
1416 ioc->dbg_fwsave_once = true; 1423 ioc->dbg_fwsave_once = true;
@@ -1419,45 +1426,29 @@ bfa_ioc_enable(struct bfa_ioc *ioc)
1419} 1426}
1420 1427
1421void 1428void
1422bfa_ioc_disable(struct bfa_ioc *ioc) 1429bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1423{ 1430{
1424 bfa_ioc_stats(ioc, ioc_disables); 1431 bfa_ioc_stats(ioc, ioc_disables);
1425 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 1432 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1426} 1433}
1427 1434
1428u32 1435static u32
1429bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) 1436bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1430{ 1437{
1431 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 1438 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1432} 1439}
1433 1440
1434u32 1441static u32
1435bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr) 1442bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1436{ 1443{
1437 return PSS_SMEM_PGOFF(fmaddr); 1444 return PSS_SMEM_PGOFF(fmaddr);
1438} 1445}
1439 1446
1440/** 1447/**
1441 * Register mailbox message handler functions
1442 *
1443 * @param[in] ioc IOC instance
1444 * @param[in] mcfuncs message class handler functions
1445 */
1446void
1447bfa_ioc_mbox_register(struct bfa_ioc *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1448{
1449 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1450 int mc;
1451
1452 for (mc = 0; mc < BFI_MC_MAX; mc++)
1453 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
1454}
1455
1456/**
1457 * Register mailbox message handler function, to be called by common modules 1448 * Register mailbox message handler function, to be called by common modules
1458 */ 1449 */
1459void 1450void
1460bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 1451bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1461 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 1452 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1462{ 1453{
1463 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 1454 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
@@ -1474,7 +1465,7 @@ bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1474 * @param[i] cmd Mailbox command 1465 * @param[i] cmd Mailbox command
1475 */ 1466 */
1476void 1467void
1477bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd) 1468bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1478{ 1469{
1479 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 1470 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1480 u32 stat; 1471 u32 stat;
@@ -1506,7 +1497,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1506 * Handle mailbox interrupts 1497 * Handle mailbox interrupts
1507 */ 1498 */
1508void 1499void
1509bfa_ioc_mbox_isr(struct bfa_ioc *ioc) 1500bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1510{ 1501{
1511 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 1502 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1512 struct bfi_mbmsg m; 1503 struct bfi_mbmsg m;
@@ -1530,86 +1521,24 @@ bfa_ioc_mbox_isr(struct bfa_ioc *ioc)
1530} 1521}
1531 1522
1532void 1523void
1533bfa_ioc_error_isr(struct bfa_ioc *ioc) 1524bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
1534{ 1525{
1535 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 1526 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1536} 1527}
1537 1528
1538void
1539bfa_ioc_set_fcmode(struct bfa_ioc *ioc)
1540{
1541 ioc->fcmode = true;
1542 ioc->port_id = bfa_ioc_pcifn(ioc);
1543}
1544
1545/**
1546 * return true if IOC is disabled
1547 */
1548bool
1549bfa_ioc_is_disabled(struct bfa_ioc *ioc)
1550{
1551 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
1552 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1553}
1554
1555/**
1556 * return true if IOC firmware is different.
1557 */
1558bool
1559bfa_ioc_fw_mismatch(struct bfa_ioc *ioc)
1560{
1561 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
1562 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) ||
1563 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
1564}
1565
1566#define bfa_ioc_state_disabled(__sm) \
1567 (((__sm) == BFI_IOC_UNINIT) || \
1568 ((__sm) == BFI_IOC_INITING) || \
1569 ((__sm) == BFI_IOC_HWINIT) || \
1570 ((__sm) == BFI_IOC_DISABLED) || \
1571 ((__sm) == BFI_IOC_FAIL) || \
1572 ((__sm) == BFI_IOC_CFG_DISABLED))
1573
1574/**
1575 * Check if adapter is disabled -- both IOCs should be in a disabled
1576 * state.
1577 */
1578bool
1579bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc)
1580{
1581 u32 ioc_state;
1582 void __iomem *rb = ioc->pcidev.pci_bar_kva;
1583
1584 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1585 return false;
1586
1587 ioc_state = readl(rb + BFA_IOC0_STATE_REG);
1588 if (!bfa_ioc_state_disabled(ioc_state))
1589 return false;
1590
1591 if (ioc->pcidev.device_id != PCI_DEVICE_ID_BROCADE_FC_8G1P) {
1592 ioc_state = readl(rb + BFA_IOC1_STATE_REG);
1593 if (!bfa_ioc_state_disabled(ioc_state))
1594 return false;
1595 }
1596
1597 return true;
1598}
1599
1600/** 1529/**
1601 * Add to IOC heartbeat failure notification queue. To be used by common 1530 * Add to IOC heartbeat failure notification queue. To be used by common
1602 * modules such as cee, port, diag. 1531 * modules such as cee, port, diag.
1603 */ 1532 */
1604void 1533void
1605bfa_ioc_hbfail_register(struct bfa_ioc *ioc, 1534bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
1606 struct bfa_ioc_hbfail_notify *notify) 1535 struct bfa_ioc_hbfail_notify *notify)
1607{ 1536{
1608 list_add_tail(&notify->qe, &ioc->hb_notify_q); 1537 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1609} 1538}
1610 1539
1611#define BFA_MFG_NAME "Brocade" 1540#define BFA_MFG_NAME "Brocade"
1612void 1541static void
1613bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, 1542bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1614 struct bfa_adapter_attr *ad_attr) 1543 struct bfa_adapter_attr *ad_attr)
1615{ 1544{
@@ -1640,7 +1569,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1640 ad_attr->prototype = 0; 1569 ad_attr->prototype = 0;
1641 1570
1642 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 1571 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1643 ad_attr->mac = bfa_ioc_get_mac(ioc); 1572 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
1644 1573
1645 ad_attr->pcie_gen = ioc_attr->pcie_gen; 1574 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1646 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 1575 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
@@ -1653,7 +1582,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1653 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna; 1582 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1654} 1583}
1655 1584
1656enum bfa_ioc_type 1585static enum bfa_ioc_type
1657bfa_ioc_get_type(struct bfa_ioc *ioc) 1586bfa_ioc_get_type(struct bfa_ioc *ioc)
1658{ 1587{
1659 if (!ioc->ctdev || ioc->fcmode) 1588 if (!ioc->ctdev || ioc->fcmode)
@@ -1668,7 +1597,7 @@ bfa_ioc_get_type(struct bfa_ioc *ioc)
1668 } 1597 }
1669} 1598}
1670 1599
1671void 1600static void
1672bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) 1601bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1673{ 1602{
1674 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); 1603 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
@@ -1677,14 +1606,14 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1677 BFA_ADAPTER_SERIAL_NUM_LEN); 1606 BFA_ADAPTER_SERIAL_NUM_LEN);
1678} 1607}
1679 1608
1680void 1609static void
1681bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) 1610bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
1682{ 1611{
1683 memset(fw_ver, 0, BFA_VERSION_LEN); 1612 memset(fw_ver, 0, BFA_VERSION_LEN);
1684 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 1613 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1685} 1614}
1686 1615
1687void 1616static void
1688bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) 1617bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1689{ 1618{
1690 BUG_ON(!(chip_rev)); 1619 BUG_ON(!(chip_rev));
@@ -1699,7 +1628,7 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1699 chip_rev[5] = '\0'; 1628 chip_rev[5] = '\0';
1700} 1629}
1701 1630
1702void 1631static void
1703bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) 1632bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1704{ 1633{
1705 memset(optrom_ver, 0, BFA_VERSION_LEN); 1634 memset(optrom_ver, 0, BFA_VERSION_LEN);
@@ -1707,14 +1636,14 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1707 BFA_VERSION_LEN); 1636 BFA_VERSION_LEN);
1708} 1637}
1709 1638
1710void 1639static void
1711bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 1640bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
1712{ 1641{
1713 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); 1642 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1714 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 1643 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1715} 1644}
1716 1645
1717void 1646static void
1718bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) 1647bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1719{ 1648{
1720 struct bfi_ioc_attr *ioc_attr; 1649 struct bfi_ioc_attr *ioc_attr;
@@ -1731,14 +1660,14 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1731 BFA_MFG_NAME, ioc_attr->card_type); 1660 BFA_MFG_NAME, ioc_attr->card_type);
1732} 1661}
1733 1662
1734enum bfa_ioc_state 1663static enum bfa_ioc_state
1735bfa_ioc_get_state(struct bfa_ioc *ioc) 1664bfa_ioc_get_state(struct bfa_ioc *ioc)
1736{ 1665{
1737 return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 1666 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1738} 1667}
1739 1668
1740void 1669void
1741bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) 1670bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
1742{ 1671{
1743 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 1672 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
1744 1673
@@ -1757,26 +1686,14 @@ bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
1757/** 1686/**
1758 * WWN public 1687 * WWN public
1759 */ 1688 */
1760u64 1689static u64
1761bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 1690bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1762{ 1691{
1763 return ioc->attr->pwwn; 1692 return ioc->attr->pwwn;
1764} 1693}
1765 1694
1766u64
1767bfa_ioc_get_nwwn(struct bfa_ioc *ioc)
1768{
1769 return ioc->attr->nwwn;
1770}
1771
1772u64
1773bfa_ioc_get_adid(struct bfa_ioc *ioc)
1774{
1775 return ioc->attr->mfg_pwwn;
1776}
1777
1778mac_t 1695mac_t
1779bfa_ioc_get_mac(struct bfa_ioc *ioc) 1696bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
1780{ 1697{
1781 /* 1698 /*
1782 * Currently mfg mac is used as FCoE enode mac (not configured by PBC) 1699 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
@@ -1787,19 +1704,7 @@ bfa_ioc_get_mac(struct bfa_ioc *ioc)
1787 return ioc->attr->mac; 1704 return ioc->attr->mac;
1788} 1705}
1789 1706
1790u64 1707static mac_t
1791bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc)
1792{
1793 return ioc->attr->mfg_pwwn;
1794}
1795
1796u64
1797bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc)
1798{
1799 return ioc->attr->mfg_nwwn;
1800}
1801
1802mac_t
1803bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc) 1708bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1804{ 1709{
1805 mac_t m; 1710 mac_t m;
@@ -1814,12 +1719,6 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1814 return m; 1719 return m;
1815} 1720}
1816 1721
1817bool
1818bfa_ioc_get_fcmode(struct bfa_ioc *ioc)
1819{
1820 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
1821}
1822
1823/** 1722/**
1824 * Firmware failure detected. Start recovery actions. 1723 * Firmware failure detected. Start recovery actions.
1825 */ 1724 */
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index 2e5c0adef899..7f0719e17efc 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -239,13 +239,9 @@ struct bfa_ioc_hwif {
239/** 239/**
240 * IOC mailbox interface 240 * IOC mailbox interface
241 */ 241 */
242void bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd); 242void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
243void bfa_ioc_mbox_register(struct bfa_ioc *ioc, 243void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
244 bfa_ioc_mbox_mcfunc_t *mcfuncs); 244void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
245void bfa_ioc_mbox_isr(struct bfa_ioc *ioc);
246void bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len);
247void bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg);
248void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
249 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); 245 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
250 246
251/** 247/**
@@ -256,83 +252,45 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
256 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ 252 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
257 (__ioc)->fcmode)) 253 (__ioc)->fcmode))
258 254
259enum bfa_status bfa_ioc_pll_init(struct bfa_ioc *ioc);
260enum bfa_status bfa_ioc_cb_pll_init(void __iomem *rb, bool fcmode);
261enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
262
263#define bfa_ioc_isr_mode_set(__ioc, __msix) \ 255#define bfa_ioc_isr_mode_set(__ioc, __msix) \
264 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) 256 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
265#define bfa_ioc_ownership_reset(__ioc) \ 257#define bfa_ioc_ownership_reset(__ioc) \
266 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) 258 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
267 259
268void bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc); 260void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
269 261
270void bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, 262void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
271 struct bfa_ioc_cbfn *cbfn); 263 struct bfa_ioc_cbfn *cbfn);
272void bfa_ioc_auto_recover(bool auto_recover); 264void bfa_nw_ioc_auto_recover(bool auto_recover);
273void bfa_ioc_detach(struct bfa_ioc *ioc); 265void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
274void bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 266void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
275 enum bfi_mclass mc); 267 enum bfi_mclass mc);
276u32 bfa_ioc_meminfo(void); 268u32 bfa_nw_ioc_meminfo(void);
277void bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); 269void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
278void bfa_ioc_enable(struct bfa_ioc *ioc); 270void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
279void bfa_ioc_disable(struct bfa_ioc *ioc); 271void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
280bool bfa_ioc_intx_claim(struct bfa_ioc *ioc); 272
281 273void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
282void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 274bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
283 u32 boot_param); 275
284void bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *msg); 276void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
285void bfa_ioc_error_isr(struct bfa_ioc *ioc); 277void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
286bool bfa_ioc_is_operational(struct bfa_ioc *ioc);
287bool bfa_ioc_is_initialized(struct bfa_ioc *ioc);
288bool bfa_ioc_is_disabled(struct bfa_ioc *ioc);
289bool bfa_ioc_fw_mismatch(struct bfa_ioc *ioc);
290bool bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc);
291void bfa_ioc_cfg_complete(struct bfa_ioc *ioc);
292enum bfa_ioc_type bfa_ioc_get_type(struct bfa_ioc *ioc);
293void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num);
294void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver);
295void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver);
296void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
297void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
298 char *manufacturer);
299void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev);
300enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc *ioc);
301
302void bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
303void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
304 struct bfa_adapter_attr *ad_attr);
305u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
306u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
307void bfa_ioc_set_fcmode(struct bfa_ioc *ioc);
308bool bfa_ioc_get_fcmode(struct bfa_ioc *ioc);
309void bfa_ioc_hbfail_register(struct bfa_ioc *ioc,
310 struct bfa_ioc_hbfail_notify *notify); 278 struct bfa_ioc_hbfail_notify *notify);
311bool bfa_ioc_sem_get(void __iomem *sem_reg); 279bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
312void bfa_ioc_sem_release(void __iomem *sem_reg); 280void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
313void bfa_ioc_hw_sem_release(struct bfa_ioc *ioc); 281void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
314void bfa_ioc_fwver_get(struct bfa_ioc *ioc, 282void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
315 struct bfi_ioc_image_hdr *fwhdr); 283 struct bfi_ioc_image_hdr *fwhdr);
316bool bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, 284bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
317 struct bfi_ioc_image_hdr *fwhdr); 285 struct bfi_ioc_image_hdr *fwhdr);
286mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
318 287
319/* 288/*
320 * Timeout APIs 289 * Timeout APIs
321 */ 290 */
322void bfa_ioc_timeout(void *ioc); 291void bfa_nw_ioc_timeout(void *ioc);
323void bfa_ioc_hb_check(void *ioc); 292void bfa_nw_ioc_hb_check(void *ioc);
324void bfa_ioc_sem_timeout(void *ioc); 293void bfa_nw_ioc_sem_timeout(void *ioc);
325
326/*
327 * bfa mfg wwn API functions
328 */
329u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
330u64 bfa_ioc_get_nwwn(struct bfa_ioc *ioc);
331mac_t bfa_ioc_get_mac(struct bfa_ioc *ioc);
332u64 bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc);
333u64 bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc);
334mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
335u64 bfa_ioc_get_adid(struct bfa_ioc *ioc);
336 294
337/* 295/*
338 * F/W Image Size & Chunk 296 * F/W Image Size & Chunk
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 870046e32c8d..462857cbab9b 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -32,25 +32,26 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
32static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 32static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
33static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc); 33static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
35static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
35 36
36struct bfa_ioc_hwif hwif_ct; 37struct bfa_ioc_hwif nw_hwif_ct;
37 38
38/** 39/**
39 * Called from bfa_ioc_attach() to map asic specific calls. 40 * Called from bfa_ioc_attach() to map asic specific calls.
40 */ 41 */
41void 42void
42bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc) 43bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
43{ 44{
44 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; 45 nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
45 hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock; 46 nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
46 hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; 47 nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
47 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 48 nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
48 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 49 nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
49 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 50 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
50 hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; 51 nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
51 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 52 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
52 53
53 ioc->ioc_hwif = &hwif_ct; 54 ioc->ioc_hwif = &nw_hwif_ct;
54} 55}
55 56
56/** 57/**
@@ -76,7 +77,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
76 BFA_IOC_FWIMG_MINSZ) 77 BFA_IOC_FWIMG_MINSZ)
77 return true; 78 return true;
78 79
79 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 80 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
80 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 81 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
81 82
82 /** 83 /**
@@ -84,7 +85,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
84 */ 85 */
85 if (usecnt == 0) { 86 if (usecnt == 0) {
86 writel(1, ioc->ioc_regs.ioc_usage_reg); 87 writel(1, ioc->ioc_regs.ioc_usage_reg);
87 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 88 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
88 return true; 89 return true;
89 } 90 }
90 91
@@ -98,9 +99,9 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
98 /** 99 /**
99 * Check if another driver with a different firmware is active 100 * Check if another driver with a different firmware is active
100 */ 101 */
101 bfa_ioc_fwver_get(ioc, &fwhdr); 102 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
102 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 103 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
103 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 104 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
104 return false; 105 return false;
105 } 106 }
106 107
@@ -109,7 +110,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
109 */ 110 */
110 usecnt++; 111 usecnt++;
111 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 112 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
112 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 113 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
113 return true; 114 return true;
114} 115}
115 116
@@ -134,14 +135,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
134 /** 135 /**
135 * decrement usage count 136 * decrement usage count
136 */ 137 */
137 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 138 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
138 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 139 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
139 BUG_ON(!(usecnt > 0)); 140 BUG_ON(!(usecnt > 0));
140 141
141 usecnt--; 142 usecnt--;
142 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 143 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
143 144
144 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 145 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
145} 146}
146 147
147/** 148/**
@@ -302,9 +303,9 @@ static void
302bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) 303bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
303{ 304{
304 if (ioc->cna) { 305 if (ioc->cna) {
305 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 306 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
306 writel(0, ioc->ioc_regs.ioc_usage_reg); 307 writel(0, ioc->ioc_regs.ioc_usage_reg);
307 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 308 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
308 } 309 }
309 310
310 /* 311 /*
@@ -313,10 +314,10 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
313 * will lock it instead of clearing it. 314 * will lock it instead of clearing it.
314 */ 315 */
315 readl(ioc->ioc_regs.ioc_sem_reg); 316 readl(ioc->ioc_regs.ioc_sem_reg);
316 bfa_ioc_hw_sem_release(ioc); 317 bfa_nw_ioc_hw_sem_release(ioc);
317} 318}
318 319
319enum bfa_status 320static enum bfa_status
320bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) 321bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
321{ 322{
322 u32 pll_sclk, pll_fclk, r32; 323 u32 pll_sclk, pll_fclk, r32;
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index 9d41ebf41cf4..f3034d6bda58 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -81,7 +81,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
81 /* Post the next entry, if needed */ 81 /* Post the next entry, if needed */
82 if (to_post) { 82 if (to_post) {
83 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q); 83 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
84 bfa_ioc_mbox_queue(&bna->device.ioc, 84 bfa_nw_ioc_mbox_queue(&bna->device.ioc,
85 &mb_qe->cmd); 85 &mb_qe->cmd);
86 } 86 }
87 } else { 87 } else {
@@ -107,7 +107,7 @@ bna_err_handler(struct bna *bna, u32 intr_status)
107 writel(init_halt, bna->device.ioc.ioc_regs.ll_halt); 107 writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
108 } 108 }
109 109
110 bfa_ioc_error_isr(&bna->device.ioc); 110 bfa_nw_ioc_error_isr(&bna->device.ioc);
111} 111}
112 112
113void 113void
@@ -118,7 +118,7 @@ bna_mbox_handler(struct bna *bna, u32 intr_status)
118 return; 118 return;
119 } 119 }
120 if (BNA_IS_MBOX_INTR(intr_status)) 120 if (BNA_IS_MBOX_INTR(intr_status))
121 bfa_ioc_mbox_isr(&bna->device.ioc); 121 bfa_nw_ioc_mbox_isr(&bna->device.ioc);
122} 122}
123 123
124void 124void
@@ -133,7 +133,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
133 bna->mbox_mod.msg_pending++; 133 bna->mbox_mod.msg_pending++;
134 if (bna->mbox_mod.state == BNA_MBOX_FREE) { 134 if (bna->mbox_mod.state == BNA_MBOX_FREE) {
135 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q); 135 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
136 bfa_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd); 136 bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
137 bna->mbox_mod.state = BNA_MBOX_POSTED; 137 bna->mbox_mod.state = BNA_MBOX_POSTED;
138 } else { 138 } else {
139 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q); 139 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
@@ -180,7 +180,7 @@ bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
180void 180void
181bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) 181bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
182{ 182{
183 bfa_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); 183 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
184 mbox_mod->state = BNA_MBOX_FREE; 184 mbox_mod->state = BNA_MBOX_FREE;
185 mbox_mod->msg_ctr = mbox_mod->msg_pending = 0; 185 mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
186 INIT_LIST_HEAD(&mbox_mod->posted_q); 186 INIT_LIST_HEAD(&mbox_mod->posted_q);
@@ -1289,7 +1289,7 @@ bna_port_mtu_set(struct bna_port *port, int mtu,
1289void 1289void
1290bna_port_mac_get(struct bna_port *port, mac_t *mac) 1290bna_port_mac_get(struct bna_port *port, mac_t *mac)
1291{ 1291{
1292 *mac = bfa_ioc_get_mac(&port->bna->device.ioc); 1292 *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
1293} 1293}
1294 1294
1295/** 1295/**
@@ -1427,7 +1427,7 @@ bna_device_sm_stopped(struct bna_device *device,
1427 case DEVICE_E_ENABLE: 1427 case DEVICE_E_ENABLE:
1428 if (device->intr_type == BNA_INTR_T_MSIX) 1428 if (device->intr_type == BNA_INTR_T_MSIX)
1429 bna_mbox_msix_idx_set(device); 1429 bna_mbox_msix_idx_set(device);
1430 bfa_ioc_enable(&device->ioc); 1430 bfa_nw_ioc_enable(&device->ioc);
1431 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait); 1431 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1432 break; 1432 break;
1433 1433
@@ -1547,7 +1547,7 @@ bna_device_sm_port_stop_wait(struct bna_device *device,
1547static void 1547static void
1548bna_device_sm_ioc_disable_wait_entry(struct bna_device *device) 1548bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
1549{ 1549{
1550 bfa_ioc_disable(&device->ioc); 1550 bfa_nw_ioc_disable(&device->ioc);
1551} 1551}
1552 1552
1553static void 1553static void
@@ -1655,12 +1655,12 @@ bna_device_init(struct bna_device *device, struct bna *bna,
1655 * 1. DMA memory for IOC attributes 1655 * 1. DMA memory for IOC attributes
1656 * 2. Kernel memory for FW trace 1656 * 2. Kernel memory for FW trace
1657 */ 1657 */
1658 bfa_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn); 1658 bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
1659 bfa_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL); 1659 bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
1660 1660
1661 BNA_GET_DMA_ADDR( 1661 BNA_GET_DMA_ADDR(
1662 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma); 1662 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1663 bfa_ioc_mem_claim(&device->ioc, 1663 bfa_nw_ioc_mem_claim(&device->ioc,
1664 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva, 1664 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
1665 dma); 1665 dma);
1666 1666
@@ -1686,9 +1686,7 @@ bna_device_uninit(struct bna_device *device)
1686{ 1686{
1687 bna_mbox_mod_uninit(&device->bna->mbox_mod); 1687 bna_mbox_mod_uninit(&device->bna->mbox_mod);
1688 1688
1689 bfa_cee_detach(&device->bna->cee); 1689 bfa_nw_ioc_detach(&device->ioc);
1690
1691 bfa_ioc_detach(&device->ioc);
1692 1690
1693 device->bna = NULL; 1691 device->bna = NULL;
1694} 1692}
@@ -1783,10 +1781,10 @@ bna_adv_device_init(struct bna_device *device, struct bna *bna,
1783 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); 1781 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1784 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; 1782 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1785 1783
1786 bfa_cee_attach(&bna->cee, &device->ioc, bna); 1784 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1787 bfa_cee_mem_claim(&bna->cee, kva, dma); 1785 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1788 kva += bfa_cee_meminfo(); 1786 kva += bfa_nw_cee_meminfo();
1789 dma += bfa_cee_meminfo(); 1787 dma += bfa_nw_cee_meminfo();
1790 1788
1791} 1789}
1792 1790
@@ -1800,7 +1798,7 @@ bna_adv_res_req(struct bna_res_info *res_info)
1800 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA; 1798 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1801 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; 1799 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1802 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( 1800 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1803 bfa_cee_meminfo(), PAGE_SIZE); 1801 bfa_nw_cee_meminfo(), PAGE_SIZE);
1804 1802
1805 /* Virtual memory for retreiving fw_trc */ 1803 /* Virtual memory for retreiving fw_trc */
1806 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; 1804 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
@@ -3333,7 +3331,7 @@ bna_res_req(struct bna_res_info *res_info)
3333 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA; 3331 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
3334 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1; 3332 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
3335 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len = 3333 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
3336 ALIGN(bfa_ioc_meminfo(), PAGE_SIZE); 3334 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
3337 3335
3338 /* DMA memory for index segment of an IB */ 3336 /* DMA memory for index segment of an IB */
3339 res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; 3337 res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 491d148f88ae..cbc1d563a0c2 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -1365,7 +1365,7 @@ bnad_ioc_timeout(unsigned long data)
1365 unsigned long flags; 1365 unsigned long flags;
1366 1366
1367 spin_lock_irqsave(&bnad->bna_lock, flags); 1367 spin_lock_irqsave(&bnad->bna_lock, flags);
1368 bfa_ioc_timeout((void *) &bnad->bna.device.ioc); 1368 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1369 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1369 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1370} 1370}
1371 1371
@@ -1376,7 +1376,7 @@ bnad_ioc_hb_check(unsigned long data)
1376 unsigned long flags; 1376 unsigned long flags;
1377 1377
1378 spin_lock_irqsave(&bnad->bna_lock, flags); 1378 spin_lock_irqsave(&bnad->bna_lock, flags);
1379 bfa_ioc_hb_check((void *) &bnad->bna.device.ioc); 1379 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1380 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1381} 1381}
1382 1382
@@ -1387,7 +1387,7 @@ bnad_ioc_sem_timeout(unsigned long data)
1387 unsigned long flags; 1387 unsigned long flags;
1388 1388
1389 spin_lock_irqsave(&bnad->bna_lock, flags); 1389 spin_lock_irqsave(&bnad->bna_lock, flags);
1390 bfa_ioc_sem_timeout((void *) &bnad->bna.device.ioc); 1390 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
1391 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1391 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1392} 1392}
1393 1393
@@ -3067,7 +3067,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3067 } 3067 }
3068 bnad = netdev_priv(netdev); 3068 bnad = netdev_priv(netdev);
3069 3069
3070
3071 /* 3070 /*
3072 * PCI initialization 3071 * PCI initialization
3073 * Output : using_dac = 1 for 64 bit DMA 3072 * Output : using_dac = 1 for 64 bit DMA
@@ -3239,7 +3238,7 @@ bnad_module_init(void)
3239 3238
3240 pr_info("Brocade 10G Ethernet driver\n"); 3239 pr_info("Brocade 10G Ethernet driver\n");
3241 3240
3242 bfa_ioc_auto_recover(bnad_ioc_auto_recover); 3241 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3243 3242
3244 err = pci_register_driver(&bnad_pci_driver); 3243 err = pci_register_driver(&bnad_pci_driver);
3245 if (err < 0) { 3244 if (err < 0) {
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 7e630f5e8e03..b337bd9bed29 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -276,7 +276,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
276 if (ioc_attr) { 276 if (ioc_attr) {
277 memset(ioc_attr, 0, sizeof(*ioc_attr)); 277 memset(ioc_attr, 0, sizeof(*ioc_attr));
278 spin_lock_irqsave(&bnad->bna_lock, flags); 278 spin_lock_irqsave(&bnad->bna_lock, flags);
279 bfa_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); 279 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
280 spin_unlock_irqrestore(&bnad->bna_lock, flags); 280 spin_unlock_irqrestore(&bnad->bna_lock, flags);
281 281
282 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, 282 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,