aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bna/bfa_ioc.c8
-rw-r--r--drivers/net/bna/bfa_ioc.h1
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c2
-rw-r--r--drivers/net/bna/bfa_sm.h2
-rw-r--r--drivers/net/bna/bna.h108
-rw-r--r--drivers/net/bna/bna_ctrl.c559
-rw-r--r--drivers/net/bna/bna_hw.h1
-rw-r--r--drivers/net/bna/bna_txrx.c149
-rw-r--r--drivers/net/bna/bnad.c29
-rw-r--r--drivers/net/bna/bnad.h1
-rw-r--r--drivers/net/bna/cna_fwimg.c2
11 files changed, 170 insertions, 692 deletions
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index 73493de98de5..e94e5aa97515 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -65,7 +65,7 @@
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd)) 66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
67 67
68bool bfa_nw_auto_recover = true; 68static bool bfa_nw_auto_recover = true;
69 69
70/* 70/*
71 * forward declarations 71 * forward declarations
@@ -1276,12 +1276,6 @@ bfa_nw_ioc_auto_recover(bool auto_recover)
1276 bfa_nw_auto_recover = auto_recover; 1276 bfa_nw_auto_recover = auto_recover;
1277} 1277}
1278 1278
1279bool
1280bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
1281{
1282 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1283}
1284
1285static void 1279static void
1286bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 1280bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1287{ 1281{
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index 7f0719e17efc..a73d84ec808c 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -271,7 +271,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
271void bfa_nw_ioc_disable(struct bfa_ioc *ioc); 271void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
272 272
273void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); 273void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
274bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
275 274
276void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); 275void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
277void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, 276void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 462857cbab9b..121cfd6d48b1 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -34,7 +34,7 @@ static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
35static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); 35static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
36 36
37struct bfa_ioc_hwif nw_hwif_ct; 37static struct bfa_ioc_hwif nw_hwif_ct;
38 38
39/** 39/**
40 * Called from bfa_ioc_attach() to map asic specific calls. 40 * Called from bfa_ioc_attach() to map asic specific calls.
diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_sm.h
index 1d3d975d6f68..46462c49b6f9 100644
--- a/drivers/net/bna/bfa_sm.h
+++ b/drivers/net/bna/bfa_sm.h
@@ -77,7 +77,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
77 ((_fsm)->fsm == (bfa_fsm_t)(_state)) 77 ((_fsm)->fsm == (bfa_fsm_t)(_state))
78 78
79static inline int 79static inline int
80bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm) 80bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
81{ 81{
82 int i = 0; 82 int i = 0;
83 83
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
index 6a2b3291c190..df6676bbc84e 100644
--- a/drivers/net/bna/bna.h
+++ b/drivers/net/bna/bna.h
@@ -19,8 +19,7 @@
19#include "bfi_ll.h" 19#include "bfi_ll.h"
20#include "bna_types.h" 20#include "bna_types.h"
21 21
22extern u32 bna_dim_vector[][BNA_BIAS_T_MAX]; 22extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
23extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
24 23
25/** 24/**
26 * 25 *
@@ -344,9 +343,6 @@ do { \
344 * BNA 343 * BNA
345 */ 344 */
346 345
347/* Internal APIs */
348void bna_adv_res_req(struct bna_res_info *res_info);
349
350/* APIs for BNAD */ 346/* APIs for BNAD */
351void bna_res_req(struct bna_res_info *res_info); 347void bna_res_req(struct bna_res_info *res_info);
352void bna_init(struct bna *bna, struct bnad *bnad, 348void bna_init(struct bna *bna, struct bnad *bnad,
@@ -354,7 +350,6 @@ void bna_init(struct bna *bna, struct bnad *bnad,
354 struct bna_res_info *res_info); 350 struct bna_res_info *res_info);
355void bna_uninit(struct bna *bna); 351void bna_uninit(struct bna *bna);
356void bna_stats_get(struct bna *bna); 352void bna_stats_get(struct bna *bna);
357void bna_stats_clr(struct bna *bna);
358void bna_get_perm_mac(struct bna *bna, u8 *mac); 353void bna_get_perm_mac(struct bna *bna, u8 *mac);
359 354
360/* APIs for Rx */ 355/* APIs for Rx */
@@ -376,18 +371,6 @@ void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
376 * DEVICE 371 * DEVICE
377 */ 372 */
378 373
379/* Interanl APIs */
380void bna_adv_device_init(struct bna_device *device, struct bna *bna,
381 struct bna_res_info *res_info);
382
383/* APIs for BNA */
384void bna_device_init(struct bna_device *device, struct bna *bna,
385 struct bna_res_info *res_info);
386void bna_device_uninit(struct bna_device *device);
387void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
388int bna_device_status_get(struct bna_device *device);
389int bna_device_state_get(struct bna_device *device);
390
391/* APIs for BNAD */ 374/* APIs for BNAD */
392void bna_device_enable(struct bna_device *device); 375void bna_device_enable(struct bna_device *device);
393void bna_device_disable(struct bna_device *device, 376void bna_device_disable(struct bna_device *device,
@@ -397,12 +380,6 @@ void bna_device_disable(struct bna_device *device,
397 * MBOX 380 * MBOX
398 */ 381 */
399 382
400/* APIs for DEVICE */
401void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
402void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
403void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
404void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
405
406/* APIs for PORT, TX, RX */ 383/* APIs for PORT, TX, RX */
407void bna_mbox_handler(struct bna *bna, u32 intr_status); 384void bna_mbox_handler(struct bna *bna, u32 intr_status);
408void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe); 385void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
@@ -411,17 +388,6 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
411 * PORT 388 * PORT
412 */ 389 */
413 390
414/* APIs for BNA */
415void bna_port_init(struct bna_port *port, struct bna *bna);
416void bna_port_uninit(struct bna_port *port);
417int bna_port_state_get(struct bna_port *port);
418int bna_llport_state_get(struct bna_llport *llport);
419
420/* APIs for DEVICE */
421void bna_port_start(struct bna_port *port);
422void bna_port_stop(struct bna_port *port);
423void bna_port_fail(struct bna_port *port);
424
425/* API for RX */ 391/* API for RX */
426int bna_port_mtu_get(struct bna_port *port); 392int bna_port_mtu_get(struct bna_port *port);
427void bna_llport_admin_up(struct bna_llport *llport); 393void bna_llport_admin_up(struct bna_llport *llport);
@@ -437,12 +403,6 @@ void bna_port_pause_config(struct bna_port *port,
437void bna_port_mtu_set(struct bna_port *port, int mtu, 403void bna_port_mtu_set(struct bna_port *port, int mtu,
438 void (*cbfn)(struct bnad *, enum bna_cb_status)); 404 void (*cbfn)(struct bnad *, enum bna_cb_status));
439void bna_port_mac_get(struct bna_port *port, mac_t *mac); 405void bna_port_mac_get(struct bna_port *port, mac_t *mac);
440void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
441void bna_port_linkcbfn_set(struct bna_port *port,
442 void (*linkcbfn)(struct bnad *,
443 enum bna_link_status));
444void bna_port_admin_up(struct bna_port *port);
445void bna_port_admin_down(struct bna_port *port);
446 406
447/* Callbacks for TX, RX */ 407/* Callbacks for TX, RX */
448void bna_port_cb_tx_stopped(struct bna_port *port, 408void bna_port_cb_tx_stopped(struct bna_port *port,
@@ -450,11 +410,6 @@ void bna_port_cb_tx_stopped(struct bna_port *port,
450void bna_port_cb_rx_stopped(struct bna_port *port, 410void bna_port_cb_rx_stopped(struct bna_port *port,
451 enum bna_cb_status status); 411 enum bna_cb_status status);
452 412
453/* Callbacks for MBOX */
454void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
455 int status);
456void bna_port_cb_link_down(struct bna_port *port, int status);
457
458/** 413/**
459 * IB 414 * IB
460 */ 415 */
@@ -464,25 +419,10 @@ void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
464 struct bna_res_info *res_info); 419 struct bna_res_info *res_info);
465void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod); 420void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
466 421
467/* APIs for TX, RX */
468struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
469 enum bna_intr_type intr_type, int vector);
470void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
471int bna_ib_reserve_idx(struct bna_ib *ib);
472void bna_ib_release_idx(struct bna_ib *ib, int idx);
473int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
474void bna_ib_start(struct bna_ib *ib);
475void bna_ib_stop(struct bna_ib *ib);
476void bna_ib_fail(struct bna_ib *ib);
477void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
478
479/** 422/**
480 * TX MODULE AND TX 423 * TX MODULE AND TX
481 */ 424 */
482 425
483/* Internal APIs */
484void bna_tx_prio_changed(struct bna_tx *tx, int prio);
485
486/* APIs for BNA */ 426/* APIs for BNA */
487void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, 427void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
488 struct bna_res_info *res_info); 428 struct bna_res_info *res_info);
@@ -508,10 +448,6 @@ void bna_tx_enable(struct bna_tx *tx);
508void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, 448void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
509 void (*cbfn)(void *, struct bna_tx *, 449 void (*cbfn)(void *, struct bna_tx *,
510 enum bna_cb_status)); 450 enum bna_cb_status));
511enum bna_cb_status
512bna_tx_prio_set(struct bna_tx *tx, int prio,
513 void (*cbfn)(struct bnad *, struct bna_tx *,
514 enum bna_cb_status));
515void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); 451void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
516 452
517/** 453/**
@@ -564,35 +500,20 @@ void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
564 void (*cbfn)(void *, struct bna_rx *, 500 void (*cbfn)(void *, struct bna_rx *,
565 enum bna_cb_status)); 501 enum bna_cb_status));
566void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); 502void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
567void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]); 503void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
568void bna_rx_dim_update(struct bna_ccb *ccb); 504void bna_rx_dim_update(struct bna_ccb *ccb);
569enum bna_cb_status 505enum bna_cb_status
570bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, 506bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
571 void (*cbfn)(struct bnad *, struct bna_rx *, 507 void (*cbfn)(struct bnad *, struct bna_rx *,
572 enum bna_cb_status)); 508 enum bna_cb_status));
573enum bna_cb_status 509enum bna_cb_status
574bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
575 void (*cbfn)(struct bnad *, struct bna_rx *,
576 enum bna_cb_status));
577enum bna_cb_status
578bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
579 void (*cbfn)(struct bnad *, struct bna_rx *,
580 enum bna_cb_status));
581enum bna_cb_status
582bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, 510bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
583 void (*cbfn)(struct bnad *, struct bna_rx *, 511 void (*cbfn)(struct bnad *, struct bna_rx *,
584 enum bna_cb_status)); 512 enum bna_cb_status));
585enum bna_cb_status 513enum bna_cb_status
586bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
587 void (*cbfn)(struct bnad *, struct bna_rx *,
588 enum bna_cb_status));
589enum bna_cb_status
590bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, 514bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
591 void (*cbfn)(struct bnad *, struct bna_rx *, 515 void (*cbfn)(struct bnad *, struct bna_rx *,
592 enum bna_cb_status)); 516 enum bna_cb_status));
593void bna_rx_mcast_delall(struct bna_rx *rx,
594 void (*cbfn)(struct bnad *, struct bna_rx *,
595 enum bna_cb_status));
596enum bna_cb_status 517enum bna_cb_status
597bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, 518bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
598 enum bna_rxmode bitmask, 519 enum bna_rxmode bitmask,
@@ -601,36 +522,12 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
601void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); 522void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
602void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); 523void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
603void bna_rx_vlanfilter_enable(struct bna_rx *rx); 524void bna_rx_vlanfilter_enable(struct bna_rx *rx);
604void bna_rx_vlanfilter_disable(struct bna_rx *rx);
605void bna_rx_rss_enable(struct bna_rx *rx);
606void bna_rx_rss_disable(struct bna_rx *rx);
607void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
608void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
609 int nvectors);
610void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config, 525void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
611 void (*cbfn)(struct bnad *, struct bna_rx *, 526 void (*cbfn)(struct bnad *, struct bna_rx *,
612 enum bna_cb_status)); 527 enum bna_cb_status));
613void bna_rx_hds_disable(struct bna_rx *rx, 528void bna_rx_hds_disable(struct bna_rx *rx,
614 void (*cbfn)(struct bnad *, struct bna_rx *, 529 void (*cbfn)(struct bnad *, struct bna_rx *,
615 enum bna_cb_status)); 530 enum bna_cb_status));
616void bna_rx_receive_pause(struct bna_rx *rx,
617 void (*cbfn)(struct bnad *, struct bna_rx *,
618 enum bna_cb_status));
619void bna_rx_receive_resume(struct bna_rx *rx,
620 void (*cbfn)(struct bnad *, struct bna_rx *,
621 enum bna_cb_status));
622
623/* RxF APIs for RX */
624void bna_rxf_start(struct bna_rxf *rxf);
625void bna_rxf_stop(struct bna_rxf *rxf);
626void bna_rxf_fail(struct bna_rxf *rxf);
627void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
628 struct bna_rx_config *q_config);
629void bna_rxf_uninit(struct bna_rxf *rxf);
630
631/* Callback from RXF to RX */
632void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
633void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
634 531
635/** 532/**
636 * BNAD 533 * BNAD
@@ -639,7 +536,6 @@ void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
639/* Callbacks for BNA */ 536/* Callbacks for BNA */
640void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, 537void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
641 struct bna_stats *stats); 538 struct bna_stats *stats);
642void bnad_cb_stats_clr(struct bnad *bnad);
643 539
644/* Callbacks for DEVICE */ 540/* Callbacks for DEVICE */
645void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status); 541void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index ddd922f210c7..07b26598546e 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -19,6 +19,46 @@
19#include "bfa_sm.h" 19#include "bfa_sm.h"
20#include "bfa_wc.h" 20#include "bfa_wc.h"
21 21
22static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
23
24static void
25bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
26 int status)
27{
28 int i;
29 u8 prio_map;
30
31 port->llport.link_status = BNA_LINK_UP;
32 if (aen->cee_linkup)
33 port->llport.link_status = BNA_CEE_UP;
34
35 /* Compute the priority */
36 prio_map = aen->prio_map;
37 if (prio_map) {
38 for (i = 0; i < 8; i++) {
39 if ((prio_map >> i) & 0x1)
40 break;
41 }
42 port->priority = i;
43 } else
44 port->priority = 0;
45
46 /* Dispatch events */
47 bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
48 bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
49 port->link_cbfn(port->bna->bnad, port->llport.link_status);
50}
51
52static void
53bna_port_cb_link_down(struct bna_port *port, int status)
54{
55 port->llport.link_status = BNA_LINK_DOWN;
56
57 /* Dispatch events */
58 bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
59 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
60}
61
22/** 62/**
23 * MBOX 63 * MBOX
24 */ 64 */
@@ -96,7 +136,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
96 bna_mbox_aen_callback(bna, msg); 136 bna_mbox_aen_callback(bna, msg);
97} 137}
98 138
99void 139static void
100bna_err_handler(struct bna *bna, u32 intr_status) 140bna_err_handler(struct bna *bna, u32 intr_status)
101{ 141{
102 u32 init_halt; 142 u32 init_halt;
@@ -140,7 +180,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
140 } 180 }
141} 181}
142 182
143void 183static void
144bna_mbox_flush_q(struct bna *bna, struct list_head *q) 184bna_mbox_flush_q(struct bna *bna, struct list_head *q)
145{ 185{
146 struct bna_mbox_qe *mb_qe = NULL; 186 struct bna_mbox_qe *mb_qe = NULL;
@@ -166,18 +206,18 @@ bna_mbox_flush_q(struct bna *bna, struct list_head *q)
166 bna->mbox_mod.state = BNA_MBOX_FREE; 206 bna->mbox_mod.state = BNA_MBOX_FREE;
167} 207}
168 208
169void 209static void
170bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod) 210bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
171{ 211{
172} 212}
173 213
174void 214static void
175bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod) 215bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
176{ 216{
177 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q); 217 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
178} 218}
179 219
180void 220static void
181bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) 221bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
182{ 222{
183 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); 223 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
@@ -187,7 +227,7 @@ bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
187 mbox_mod->bna = bna; 227 mbox_mod->bna = bna;
188} 228}
189 229
190void 230static void
191bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod) 231bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
192{ 232{
193 mbox_mod->bna = NULL; 233 mbox_mod->bna = NULL;
@@ -538,7 +578,7 @@ bna_fw_cb_llport_down(void *arg, int status)
538 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN); 578 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
539} 579}
540 580
541void 581static void
542bna_port_cb_llport_stopped(struct bna_port *port, 582bna_port_cb_llport_stopped(struct bna_port *port,
543 enum bna_cb_status status) 583 enum bna_cb_status status)
544{ 584{
@@ -591,7 +631,7 @@ bna_llport_fail(struct bna_llport *llport)
591 bfa_fsm_send_event(llport, LLPORT_E_FAIL); 631 bfa_fsm_send_event(llport, LLPORT_E_FAIL);
592} 632}
593 633
594int 634static int
595bna_llport_state_get(struct bna_llport *llport) 635bna_llport_state_get(struct bna_llport *llport)
596{ 636{
597 return bfa_sm_to_state(llport_sm_table, llport->fsm); 637 return bfa_sm_to_state(llport_sm_table, llport->fsm);
@@ -1109,7 +1149,7 @@ bna_port_cb_chld_stopped(void *arg)
1109 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED); 1149 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
1110} 1150}
1111 1151
1112void 1152static void
1113bna_port_init(struct bna_port *port, struct bna *bna) 1153bna_port_init(struct bna_port *port, struct bna *bna)
1114{ 1154{
1115 port->bna = bna; 1155 port->bna = bna;
@@ -1137,7 +1177,7 @@ bna_port_init(struct bna_port *port, struct bna *bna)
1137 bna_llport_init(&port->llport, bna); 1177 bna_llport_init(&port->llport, bna);
1138} 1178}
1139 1179
1140void 1180static void
1141bna_port_uninit(struct bna_port *port) 1181bna_port_uninit(struct bna_port *port)
1142{ 1182{
1143 bna_llport_uninit(&port->llport); 1183 bna_llport_uninit(&port->llport);
@@ -1147,13 +1187,13 @@ bna_port_uninit(struct bna_port *port)
1147 port->bna = NULL; 1187 port->bna = NULL;
1148} 1188}
1149 1189
1150int 1190static int
1151bna_port_state_get(struct bna_port *port) 1191bna_port_state_get(struct bna_port *port)
1152{ 1192{
1153 return bfa_sm_to_state(port_sm_table, port->fsm); 1193 return bfa_sm_to_state(port_sm_table, port->fsm);
1154} 1194}
1155 1195
1156void 1196static void
1157bna_port_start(struct bna_port *port) 1197bna_port_start(struct bna_port *port)
1158{ 1198{
1159 port->flags |= BNA_PORT_F_DEVICE_READY; 1199 port->flags |= BNA_PORT_F_DEVICE_READY;
@@ -1161,7 +1201,7 @@ bna_port_start(struct bna_port *port)
1161 bfa_fsm_send_event(port, PORT_E_START); 1201 bfa_fsm_send_event(port, PORT_E_START);
1162} 1202}
1163 1203
1164void 1204static void
1165bna_port_stop(struct bna_port *port) 1205bna_port_stop(struct bna_port *port)
1166{ 1206{
1167 port->stop_cbfn = bna_device_cb_port_stopped; 1207 port->stop_cbfn = bna_device_cb_port_stopped;
@@ -1171,7 +1211,7 @@ bna_port_stop(struct bna_port *port)
1171 bfa_fsm_send_event(port, PORT_E_STOP); 1211 bfa_fsm_send_event(port, PORT_E_STOP);
1172} 1212}
1173 1213
1174void 1214static void
1175bna_port_fail(struct bna_port *port) 1215bna_port_fail(struct bna_port *port)
1176{ 1216{
1177 port->flags &= ~BNA_PORT_F_DEVICE_READY; 1217 port->flags &= ~BNA_PORT_F_DEVICE_READY;
@@ -1190,44 +1230,6 @@ bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
1190 bfa_wc_down(&port->chld_stop_wc); 1230 bfa_wc_down(&port->chld_stop_wc);
1191} 1231}
1192 1232
1193void
1194bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
1195 int status)
1196{
1197 int i;
1198 u8 prio_map;
1199
1200 port->llport.link_status = BNA_LINK_UP;
1201 if (aen->cee_linkup)
1202 port->llport.link_status = BNA_CEE_UP;
1203
1204 /* Compute the priority */
1205 prio_map = aen->prio_map;
1206 if (prio_map) {
1207 for (i = 0; i < 8; i++) {
1208 if ((prio_map >> i) & 0x1)
1209 break;
1210 }
1211 port->priority = i;
1212 } else
1213 port->priority = 0;
1214
1215 /* Dispatch events */
1216 bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
1217 bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
1218 port->link_cbfn(port->bna->bnad, port->llport.link_status);
1219}
1220
1221void
1222bna_port_cb_link_down(struct bna_port *port, int status)
1223{
1224 port->llport.link_status = BNA_LINK_DOWN;
1225
1226 /* Dispatch events */
1227 bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
1228 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
1229}
1230
1231int 1233int
1232bna_port_mtu_get(struct bna_port *port) 1234bna_port_mtu_get(struct bna_port *port)
1233{ 1235{
@@ -1293,54 +1295,6 @@ bna_port_mac_get(struct bna_port *port, mac_t *mac)
1293} 1295}
1294 1296
1295/** 1297/**
1296 * Should be called only when port is disabled
1297 */
1298void
1299bna_port_type_set(struct bna_port *port, enum bna_port_type type)
1300{
1301 port->type = type;
1302 port->llport.type = type;
1303}
1304
1305/**
1306 * Should be called only when port is disabled
1307 */
1308void
1309bna_port_linkcbfn_set(struct bna_port *port,
1310 void (*linkcbfn)(struct bnad *, enum bna_link_status))
1311{
1312 port->link_cbfn = linkcbfn;
1313}
1314
1315void
1316bna_port_admin_up(struct bna_port *port)
1317{
1318 struct bna_llport *llport = &port->llport;
1319
1320 if (llport->flags & BNA_LLPORT_F_ENABLED)
1321 return;
1322
1323 llport->flags |= BNA_LLPORT_F_ENABLED;
1324
1325 if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
1326 bfa_fsm_send_event(llport, LLPORT_E_UP);
1327}
1328
1329void
1330bna_port_admin_down(struct bna_port *port)
1331{
1332 struct bna_llport *llport = &port->llport;
1333
1334 if (!(llport->flags & BNA_LLPORT_F_ENABLED))
1335 return;
1336
1337 llport->flags &= ~BNA_LLPORT_F_ENABLED;
1338
1339 if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
1340 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
1341}
1342
1343/**
1344 * DEVICE 1298 * DEVICE
1345 */ 1299 */
1346#define enable_mbox_intr(_device)\ 1300#define enable_mbox_intr(_device)\
@@ -1357,7 +1311,7 @@ do {\
1357 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\ 1311 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1358} while (0) 1312} while (0)
1359 1313
1360const struct bna_chip_regs_offset reg_offset[] = 1314static const struct bna_chip_regs_offset reg_offset[] =
1361{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS, 1315{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
1362 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0}, 1316 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
1363{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS, 1317{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
@@ -1642,7 +1596,34 @@ static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
1642 bna_device_cb_iocll_reset 1596 bna_device_cb_iocll_reset
1643}; 1597};
1644 1598
1645void 1599/* device */
1600static void
1601bna_adv_device_init(struct bna_device *device, struct bna *bna,
1602 struct bna_res_info *res_info)
1603{
1604 u8 *kva;
1605 u64 dma;
1606
1607 device->bna = bna;
1608
1609 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1610
1611 /**
1612 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1613 * DMA memory.
1614 */
1615 BNA_GET_DMA_ADDR(
1616 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1617 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1618
1619 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1620 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1621 kva += bfa_nw_cee_meminfo();
1622 dma += bfa_nw_cee_meminfo();
1623
1624}
1625
1626static void
1646bna_device_init(struct bna_device *device, struct bna *bna, 1627bna_device_init(struct bna_device *device, struct bna *bna,
1647 struct bna_res_info *res_info) 1628 struct bna_res_info *res_info)
1648{ 1629{
@@ -1681,7 +1662,7 @@ bna_device_init(struct bna_device *device, struct bna *bna,
1681 bfa_fsm_set_state(device, bna_device_sm_stopped); 1662 bfa_fsm_set_state(device, bna_device_sm_stopped);
1682} 1663}
1683 1664
1684void 1665static void
1685bna_device_uninit(struct bna_device *device) 1666bna_device_uninit(struct bna_device *device)
1686{ 1667{
1687 bna_mbox_mod_uninit(&device->bna->mbox_mod); 1668 bna_mbox_mod_uninit(&device->bna->mbox_mod);
@@ -1691,7 +1672,7 @@ bna_device_uninit(struct bna_device *device)
1691 device->bna = NULL; 1672 device->bna = NULL;
1692} 1673}
1693 1674
1694void 1675static void
1695bna_device_cb_port_stopped(void *arg, enum bna_cb_status status) 1676bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1696{ 1677{
1697 struct bna_device *device = (struct bna_device *)arg; 1678 struct bna_device *device = (struct bna_device *)arg;
@@ -1699,7 +1680,7 @@ bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1699 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED); 1680 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
1700} 1681}
1701 1682
1702int 1683static int
1703bna_device_status_get(struct bna_device *device) 1684bna_device_status_get(struct bna_device *device)
1704{ 1685{
1705 return device->fsm == (bfa_fsm_t)bna_device_sm_ready; 1686 return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
@@ -1733,24 +1714,13 @@ bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
1733 bfa_fsm_send_event(device, DEVICE_E_DISABLE); 1714 bfa_fsm_send_event(device, DEVICE_E_DISABLE);
1734} 1715}
1735 1716
1736int 1717static int
1737bna_device_state_get(struct bna_device *device) 1718bna_device_state_get(struct bna_device *device)
1738{ 1719{
1739 return bfa_sm_to_state(device_sm_table, device->fsm); 1720 return bfa_sm_to_state(device_sm_table, device->fsm);
1740} 1721}
1741 1722
1742u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { 1723const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1743 {12, 20},
1744 {10, 18},
1745 {8, 16},
1746 {6, 12},
1747 {4, 8},
1748 {3, 6},
1749 {2, 4},
1750 {1, 2},
1751};
1752
1753u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1754 {12, 12}, 1724 {12, 12},
1755 {6, 10}, 1725 {6, 10},
1756 {5, 10}, 1726 {5, 10},
@@ -1761,36 +1731,9 @@ u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1761 {1, 2}, 1731 {1, 2},
1762}; 1732};
1763 1733
1764/* device */
1765void
1766bna_adv_device_init(struct bna_device *device, struct bna *bna,
1767 struct bna_res_info *res_info)
1768{
1769 u8 *kva;
1770 u64 dma;
1771
1772 device->bna = bna;
1773
1774 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1775
1776 /**
1777 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1778 * DMA memory.
1779 */
1780 BNA_GET_DMA_ADDR(
1781 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1782 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1783
1784 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1785 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1786 kva += bfa_nw_cee_meminfo();
1787 dma += bfa_nw_cee_meminfo();
1788
1789}
1790
1791/* utils */ 1734/* utils */
1792 1735
1793void 1736static void
1794bna_adv_res_req(struct bna_res_info *res_info) 1737bna_adv_res_req(struct bna_res_info *res_info)
1795{ 1738{
1796 /* DMA memory for COMMON_MODULE */ 1739 /* DMA memory for COMMON_MODULE */
@@ -2044,36 +1987,6 @@ bna_fw_stats_get(struct bna *bna)
2044 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1]; 1987 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
2045} 1988}
2046 1989
2047static void
2048bna_fw_cb_stats_clr(void *arg, int status)
2049{
2050 struct bna *bna = (struct bna *)arg;
2051
2052 bfa_q_qe_init(&bna->mbox_qe.qe);
2053
2054 memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
2055 memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
2056
2057 bnad_cb_stats_clr(bna->bnad);
2058}
2059
2060static void
2061bna_fw_stats_clr(struct bna *bna)
2062{
2063 struct bfi_ll_stats_req ll_req;
2064
2065 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
2066 ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2067 ll_req.rxf_id_mask[0] = htonl(0xffffffff);
2068 ll_req.rxf_id_mask[1] = htonl(0xffffffff);
2069 ll_req.txf_id_mask[0] = htonl(0xffffffff);
2070 ll_req.txf_id_mask[1] = htonl(0xffffffff);
2071
2072 bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2073 bna_fw_cb_stats_clr, bna);
2074 bna_mbox_send(bna, &bna->mbox_qe);
2075}
2076
2077void 1990void
2078bna_stats_get(struct bna *bna) 1991bna_stats_get(struct bna *bna)
2079{ 1992{
@@ -2083,22 +1996,8 @@ bna_stats_get(struct bna *bna)
2083 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); 1996 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2084} 1997}
2085 1998
2086void
2087bna_stats_clr(struct bna *bna)
2088{
2089 if (bna_device_status_get(&bna->device))
2090 bna_fw_stats_clr(bna);
2091 else {
2092 memset(&bna->stats.sw_stats, 0,
2093 sizeof(struct bna_sw_stats));
2094 memset(bna->stats.hw_stats, 0,
2095 sizeof(struct bfi_ll_stats));
2096 bnad_cb_stats_clr(bna->bnad);
2097 }
2098}
2099
2100/* IB */ 1999/* IB */
2101void 2000static void
2102bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) 2001bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
2103{ 2002{
2104 ib->ib_config.coalescing_timeo = coalescing_timeo; 2003 ib->ib_config.coalescing_timeo = coalescing_timeo;
@@ -2157,7 +2056,7 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
2157 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); 2056 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
2158} 2057}
2159 2058
2160void 2059static void
2161__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status) 2060__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
2162{ 2061{
2163 struct bna_rx_fndb_ram *rx_fndb_ram; 2062 struct bna_rx_fndb_ram *rx_fndb_ram;
@@ -2553,7 +2452,7 @@ rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
2553 * 0 = no h/w change 2452 * 0 = no h/w change
2554 * 1 = need h/w change 2453 * 1 = need h/w change
2555 */ 2454 */
2556int 2455static int
2557rxf_promisc_enable(struct bna_rxf *rxf) 2456rxf_promisc_enable(struct bna_rxf *rxf)
2558{ 2457{
2559 struct bna *bna = rxf->rx->bna; 2458 struct bna *bna = rxf->rx->bna;
@@ -2584,7 +2483,7 @@ rxf_promisc_enable(struct bna_rxf *rxf)
2584 * 0 = no h/w change 2483 * 0 = no h/w change
2585 * 1 = need h/w change 2484 * 1 = need h/w change
2586 */ 2485 */
2587int 2486static int
2588rxf_promisc_disable(struct bna_rxf *rxf) 2487rxf_promisc_disable(struct bna_rxf *rxf)
2589{ 2488{
2590 struct bna *bna = rxf->rx->bna; 2489 struct bna *bna = rxf->rx->bna;
@@ -2623,7 +2522,7 @@ rxf_promisc_disable(struct bna_rxf *rxf)
2623 * 0 = no h/w change 2522 * 0 = no h/w change
2624 * 1 = need h/w change 2523 * 1 = need h/w change
2625 */ 2524 */
2626int 2525static int
2627rxf_default_enable(struct bna_rxf *rxf) 2526rxf_default_enable(struct bna_rxf *rxf)
2628{ 2527{
2629 struct bna *bna = rxf->rx->bna; 2528 struct bna *bna = rxf->rx->bna;
@@ -2654,7 +2553,7 @@ rxf_default_enable(struct bna_rxf *rxf)
2654 * 0 = no h/w change 2553 * 0 = no h/w change
2655 * 1 = need h/w change 2554 * 1 = need h/w change
2656 */ 2555 */
2657int 2556static int
2658rxf_default_disable(struct bna_rxf *rxf) 2557rxf_default_disable(struct bna_rxf *rxf)
2659{ 2558{
2660 struct bna *bna = rxf->rx->bna; 2559 struct bna *bna = rxf->rx->bna;
@@ -2693,7 +2592,7 @@ rxf_default_disable(struct bna_rxf *rxf)
2693 * 0 = no h/w change 2592 * 0 = no h/w change
2694 * 1 = need h/w change 2593 * 1 = need h/w change
2695 */ 2594 */
2696int 2595static int
2697rxf_allmulti_enable(struct bna_rxf *rxf) 2596rxf_allmulti_enable(struct bna_rxf *rxf)
2698{ 2597{
2699 int ret = 0; 2598 int ret = 0;
@@ -2721,7 +2620,7 @@ rxf_allmulti_enable(struct bna_rxf *rxf)
2721 * 0 = no h/w change 2620 * 0 = no h/w change
2722 * 1 = need h/w change 2621 * 1 = need h/w change
2723 */ 2622 */
2724int 2623static int
2725rxf_allmulti_disable(struct bna_rxf *rxf) 2624rxf_allmulti_disable(struct bna_rxf *rxf)
2726{ 2625{
2727 int ret = 0; 2626 int ret = 0;
@@ -2746,159 +2645,6 @@ rxf_allmulti_disable(struct bna_rxf *rxf)
2746} 2645}
2747 2646
2748/* RxF <- bnad */ 2647/* RxF <- bnad */
2749void
2750bna_rx_mcast_delall(struct bna_rx *rx,
2751 void (*cbfn)(struct bnad *, struct bna_rx *,
2752 enum bna_cb_status))
2753{
2754 struct bna_rxf *rxf = &rx->rxf;
2755 struct list_head *qe;
2756 struct bna_mac *mac;
2757 int need_hw_config = 0;
2758
2759 /* Purge all entries from pending_add_q */
2760 while (!list_empty(&rxf->mcast_pending_add_q)) {
2761 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
2762 mac = (struct bna_mac *)qe;
2763 bfa_q_qe_init(&mac->qe);
2764 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
2765 }
2766
2767 /* Schedule all entries in active_q for deletion */
2768 while (!list_empty(&rxf->mcast_active_q)) {
2769 bfa_q_deq(&rxf->mcast_active_q, &qe);
2770 mac = (struct bna_mac *)qe;
2771 bfa_q_qe_init(&mac->qe);
2772 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
2773 need_hw_config = 1;
2774 }
2775
2776 if (need_hw_config) {
2777 rxf->cam_fltr_cbfn = cbfn;
2778 rxf->cam_fltr_cbarg = rx->bna->bnad;
2779 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2780 return;
2781 }
2782
2783 if (cbfn)
2784 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2785}
2786
2787/* RxF <- Rx */
2788void
2789bna_rx_receive_resume(struct bna_rx *rx,
2790 void (*cbfn)(struct bnad *, struct bna_rx *,
2791 enum bna_cb_status))
2792{
2793 struct bna_rxf *rxf = &rx->rxf;
2794
2795 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
2796 rxf->oper_state_cbfn = cbfn;
2797 rxf->oper_state_cbarg = rx->bna->bnad;
2798 bfa_fsm_send_event(rxf, RXF_E_RESUME);
2799 } else if (cbfn)
2800 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2801}
2802
2803void
2804bna_rx_receive_pause(struct bna_rx *rx,
2805 void (*cbfn)(struct bnad *, struct bna_rx *,
2806 enum bna_cb_status))
2807{
2808 struct bna_rxf *rxf = &rx->rxf;
2809
2810 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
2811 rxf->oper_state_cbfn = cbfn;
2812 rxf->oper_state_cbarg = rx->bna->bnad;
2813 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
2814 } else if (cbfn)
2815 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2816}
2817
2818/* RxF <- bnad */
2819enum bna_cb_status
2820bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
2821 void (*cbfn)(struct bnad *, struct bna_rx *,
2822 enum bna_cb_status))
2823{
2824 struct bna_rxf *rxf = &rx->rxf;
2825 struct list_head *qe;
2826 struct bna_mac *mac;
2827
2828 /* Check if already added */
2829 list_for_each(qe, &rxf->ucast_active_q) {
2830 mac = (struct bna_mac *)qe;
2831 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2832 if (cbfn)
2833 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2834 return BNA_CB_SUCCESS;
2835 }
2836 }
2837
2838 /* Check if pending addition */
2839 list_for_each(qe, &rxf->ucast_pending_add_q) {
2840 mac = (struct bna_mac *)qe;
2841 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2842 if (cbfn)
2843 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2844 return BNA_CB_SUCCESS;
2845 }
2846 }
2847
2848 mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
2849 if (mac == NULL)
2850 return BNA_CB_UCAST_CAM_FULL;
2851 bfa_q_qe_init(&mac->qe);
2852 memcpy(mac->addr, addr, ETH_ALEN);
2853 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
2854
2855 rxf->cam_fltr_cbfn = cbfn;
2856 rxf->cam_fltr_cbarg = rx->bna->bnad;
2857
2858 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2859
2860 return BNA_CB_SUCCESS;
2861}
2862
2863/* RxF <- bnad */
2864enum bna_cb_status
2865bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
2866 void (*cbfn)(struct bnad *, struct bna_rx *,
2867 enum bna_cb_status))
2868{
2869 struct bna_rxf *rxf = &rx->rxf;
2870 struct list_head *qe;
2871 struct bna_mac *mac;
2872
2873 list_for_each(qe, &rxf->ucast_pending_add_q) {
2874 mac = (struct bna_mac *)qe;
2875 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2876 list_del(qe);
2877 bfa_q_qe_init(qe);
2878 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2879 if (cbfn)
2880 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2881 return BNA_CB_SUCCESS;
2882 }
2883 }
2884
2885 list_for_each(qe, &rxf->ucast_active_q) {
2886 mac = (struct bna_mac *)qe;
2887 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2888 list_del(qe);
2889 bfa_q_qe_init(qe);
2890 list_add_tail(qe, &rxf->ucast_pending_del_q);
2891 rxf->cam_fltr_cbfn = cbfn;
2892 rxf->cam_fltr_cbarg = rx->bna->bnad;
2893 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2894 return BNA_CB_SUCCESS;
2895 }
2896 }
2897
2898 return BNA_CB_INVALID_MAC;
2899}
2900
2901/* RxF <- bnad */
2902enum bna_cb_status 2648enum bna_cb_status
2903bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, 2649bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2904 enum bna_rxmode bitmask, 2650 enum bna_rxmode bitmask,
@@ -2978,39 +2724,6 @@ err_return:
2978 return BNA_CB_FAIL; 2724 return BNA_CB_FAIL;
2979} 2725}
2980 2726
2981/* RxF <- bnad */
2982void
2983bna_rx_rss_enable(struct bna_rx *rx)
2984{
2985 struct bna_rxf *rxf = &rx->rxf;
2986
2987 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
2988 rxf->rss_status = BNA_STATUS_T_ENABLED;
2989 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2990}
2991
2992/* RxF <- bnad */
2993void
2994bna_rx_rss_disable(struct bna_rx *rx)
2995{
2996 struct bna_rxf *rxf = &rx->rxf;
2997
2998 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
2999 rxf->rss_status = BNA_STATUS_T_DISABLED;
3000 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3001}
3002
3003/* RxF <- bnad */
3004void
3005bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
3006{
3007 struct bna_rxf *rxf = &rx->rxf;
3008 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
3009 rxf->rss_status = BNA_STATUS_T_ENABLED;
3010 rxf->rss_cfg = *rss_config;
3011 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3012}
3013
3014void 2727void
3015/* RxF <- bnad */ 2728/* RxF <- bnad */
3016bna_rx_vlanfilter_enable(struct bna_rx *rx) 2729bna_rx_vlanfilter_enable(struct bna_rx *rx)
@@ -3024,68 +2737,8 @@ bna_rx_vlanfilter_enable(struct bna_rx *rx)
3024 } 2737 }
3025} 2738}
3026 2739
3027/* RxF <- bnad */
3028void
3029bna_rx_vlanfilter_disable(struct bna_rx *rx)
3030{
3031 struct bna_rxf *rxf = &rx->rxf;
3032
3033 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
3034 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
3035 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
3036 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3037 }
3038}
3039
3040/* Rx */ 2740/* Rx */
3041 2741
3042struct bna_rxp *
3043bna_rx_get_rxp(struct bna_rx *rx, int vector)
3044{
3045 struct bna_rxp *rxp;
3046 struct list_head *qe;
3047
3048 list_for_each(qe, &rx->rxp_q) {
3049 rxp = (struct bna_rxp *)qe;
3050 if (rxp->vector == vector)
3051 return rxp;
3052 }
3053 return NULL;
3054}
3055
3056/*
3057 * bna_rx_rss_rit_set()
3058 * Sets the Q ids for the specified msi-x vectors in the RIT.
3059 * Maximum rit size supported is 64, which should be the max size of the
3060 * vectors array.
3061 */
3062
3063void
3064bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
3065{
3066 int i;
3067 struct bna_rxp *rxp;
3068 struct bna_rxq *q0 = NULL, *q1 = NULL;
3069 struct bna *bna;
3070 struct bna_rxf *rxf;
3071
3072 /* Build the RIT contents for this RX */
3073 bna = rx->bna;
3074
3075 rxf = &rx->rxf;
3076 for (i = 0; i < nvectors; i++) {
3077 rxp = bna_rx_get_rxp(rx, vectors[i]);
3078
3079 GET_RXQS(rxp, q0, q1);
3080 rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
3081 rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
3082 }
3083
3084 rxf->rit_segment->rit_size = nvectors;
3085
3086 /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
3087}
3088
3089/* Rx <- bnad */ 2742/* Rx <- bnad */
3090void 2743void
3091bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) 2744bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
@@ -3102,7 +2755,7 @@ bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
3102 2755
3103/* Rx <- bnad */ 2756/* Rx <- bnad */
3104void 2757void
3105bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]) 2758bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
3106{ 2759{
3107 int i, j; 2760 int i, j;
3108 2761
@@ -3165,22 +2818,6 @@ bna_rx_dim_update(struct bna_ccb *ccb)
3165 2818
3166/* Tx */ 2819/* Tx */
3167/* TX <- bnad */ 2820/* TX <- bnad */
3168enum bna_cb_status
3169bna_tx_prio_set(struct bna_tx *tx, int prio,
3170 void (*cbfn)(struct bnad *, struct bna_tx *,
3171 enum bna_cb_status))
3172{
3173 if (tx->flags & BNA_TX_F_PRIO_LOCK)
3174 return BNA_CB_FAIL;
3175 else {
3176 tx->prio_change_cbfn = cbfn;
3177 bna_tx_prio_changed(tx, prio);
3178 }
3179
3180 return BNA_CB_SUCCESS;
3181}
3182
3183/* TX <- bnad */
3184void 2821void
3185bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) 2822bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3186{ 2823{
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
index 67eb376c5c7e..806b224a4c63 100644
--- a/drivers/net/bna/bna_hw.h
+++ b/drivers/net/bna/bna_hw.h
@@ -1282,7 +1282,6 @@ struct bna_chip_regs_offset {
1282 u32 fn_int_mask; 1282 u32 fn_int_mask;
1283 u32 msix_idx; 1283 u32 msix_idx;
1284}; 1284};
1285extern const struct bna_chip_regs_offset reg_offset[];
1286 1285
1287struct bna_chip_regs { 1286struct bna_chip_regs {
1288 void __iomem *page_addr; 1287 void __iomem *page_addr;
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index 890846d55502..ad93fdb0f427 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -195,7 +195,7 @@ bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
195 ib_mod->bna = NULL; 195 ib_mod->bna = NULL;
196} 196}
197 197
198struct bna_ib * 198static struct bna_ib *
199bna_ib_get(struct bna_ib_mod *ib_mod, 199bna_ib_get(struct bna_ib_mod *ib_mod,
200 enum bna_intr_type intr_type, 200 enum bna_intr_type intr_type,
201 int vector) 201 int vector)
@@ -240,7 +240,7 @@ bna_ib_get(struct bna_ib_mod *ib_mod,
240 return ib; 240 return ib;
241} 241}
242 242
243void 243static void
244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib) 244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
245{ 245{
246 bna_intr_put(ib_mod, ib->intr); 246 bna_intr_put(ib_mod, ib->intr);
@@ -255,7 +255,7 @@ bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
255} 255}
256 256
257/* Returns index offset - starting from 0 */ 257/* Returns index offset - starting from 0 */
258int 258static int
259bna_ib_reserve_idx(struct bna_ib *ib) 259bna_ib_reserve_idx(struct bna_ib *ib)
260{ 260{
261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; 261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
@@ -309,7 +309,7 @@ bna_ib_reserve_idx(struct bna_ib *ib)
309 return idx; 309 return idx;
310} 310}
311 311
312void 312static void
313bna_ib_release_idx(struct bna_ib *ib, int idx) 313bna_ib_release_idx(struct bna_ib *ib, int idx)
314{ 314{
315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; 315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
@@ -356,7 +356,7 @@ bna_ib_release_idx(struct bna_ib *ib, int idx)
356 } 356 }
357} 357}
358 358
359int 359static int
360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config) 360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
361{ 361{
362 if (ib->start_count) 362 if (ib->start_count)
@@ -374,7 +374,7 @@ bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
374 return 0; 374 return 0;
375} 375}
376 376
377void 377static void
378bna_ib_start(struct bna_ib *ib) 378bna_ib_start(struct bna_ib *ib)
379{ 379{
380 struct bna_ib_blk_mem ib_cfg; 380 struct bna_ib_blk_mem ib_cfg;
@@ -450,7 +450,7 @@ bna_ib_start(struct bna_ib *ib)
450 } 450 }
451} 451}
452 452
453void 453static void
454bna_ib_stop(struct bna_ib *ib) 454bna_ib_stop(struct bna_ib *ib)
455{ 455{
456 u32 intx_mask; 456 u32 intx_mask;
@@ -468,7 +468,7 @@ bna_ib_stop(struct bna_ib *ib)
468 } 468 }
469} 469}
470 470
471void 471static void
472bna_ib_fail(struct bna_ib *ib) 472bna_ib_fail(struct bna_ib *ib)
473{ 473{
474 ib->start_count = 0; 474 ib->start_count = 0;
@@ -1394,7 +1394,7 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
1394 rxf_reset_packet_filter_allmulti(rxf); 1394 rxf_reset_packet_filter_allmulti(rxf);
1395} 1395}
1396 1396
1397void 1397static void
1398bna_rxf_init(struct bna_rxf *rxf, 1398bna_rxf_init(struct bna_rxf *rxf,
1399 struct bna_rx *rx, 1399 struct bna_rx *rx,
1400 struct bna_rx_config *q_config) 1400 struct bna_rx_config *q_config)
@@ -1444,7 +1444,7 @@ bna_rxf_init(struct bna_rxf *rxf,
1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1445} 1445}
1446 1446
1447void 1447static void
1448bna_rxf_uninit(struct bna_rxf *rxf) 1448bna_rxf_uninit(struct bna_rxf *rxf)
1449{ 1449{
1450 struct bna_mac *mac; 1450 struct bna_mac *mac;
@@ -1476,7 +1476,18 @@ bna_rxf_uninit(struct bna_rxf *rxf)
1476 rxf->rx = NULL; 1476 rxf->rx = NULL;
1477} 1477}
1478 1478
1479void 1479static void
1480bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
1481{
1482 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
1483 if (rx->rxf.rxf_id < 32)
1484 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
1485 else
1486 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
1487 1 << (rx->rxf.rxf_id - 32));
1488}
1489
1490static void
1480bna_rxf_start(struct bna_rxf *rxf) 1491bna_rxf_start(struct bna_rxf *rxf)
1481{ 1492{
1482 rxf->start_cbfn = bna_rx_cb_rxf_started; 1493 rxf->start_cbfn = bna_rx_cb_rxf_started;
@@ -1485,7 +1496,18 @@ bna_rxf_start(struct bna_rxf *rxf)
1485 bfa_fsm_send_event(rxf, RXF_E_START); 1496 bfa_fsm_send_event(rxf, RXF_E_START);
1486} 1497}
1487 1498
1488void 1499static void
1500bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
1501{
1502 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
1503 if (rx->rxf.rxf_id < 32)
1504 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
1505 else
1506 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
1507 1 << (rx->rxf.rxf_id - 32);
1508}
1509
1510static void
1489bna_rxf_stop(struct bna_rxf *rxf) 1511bna_rxf_stop(struct bna_rxf *rxf)
1490{ 1512{
1491 rxf->stop_cbfn = bna_rx_cb_rxf_stopped; 1513 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
@@ -1493,7 +1515,7 @@ bna_rxf_stop(struct bna_rxf *rxf)
1493 bfa_fsm_send_event(rxf, RXF_E_STOP); 1515 bfa_fsm_send_event(rxf, RXF_E_STOP);
1494} 1516}
1495 1517
1496void 1518static void
1497bna_rxf_fail(struct bna_rxf *rxf) 1519bna_rxf_fail(struct bna_rxf *rxf)
1498{ 1520{
1499 rxf->rxf_flags |= BNA_RXF_FL_FAILED; 1521 rxf->rxf_flags |= BNA_RXF_FL_FAILED;
@@ -1576,43 +1598,6 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1576} 1598}
1577 1599
1578enum bna_cb_status 1600enum bna_cb_status
1579bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
1580 void (*cbfn)(struct bnad *, struct bna_rx *,
1581 enum bna_cb_status))
1582{
1583 struct bna_rxf *rxf = &rx->rxf;
1584 struct list_head *qe;
1585 struct bna_mac *mac;
1586
1587 list_for_each(qe, &rxf->mcast_pending_add_q) {
1588 mac = (struct bna_mac *)qe;
1589 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1590 list_del(qe);
1591 bfa_q_qe_init(qe);
1592 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1593 if (cbfn)
1594 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1595 return BNA_CB_SUCCESS;
1596 }
1597 }
1598
1599 list_for_each(qe, &rxf->mcast_active_q) {
1600 mac = (struct bna_mac *)qe;
1601 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1602 list_del(qe);
1603 bfa_q_qe_init(qe);
1604 list_add_tail(qe, &rxf->mcast_pending_del_q);
1605 rxf->cam_fltr_cbfn = cbfn;
1606 rxf->cam_fltr_cbarg = rx->bna->bnad;
1607 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1608 return BNA_CB_SUCCESS;
1609 }
1610 }
1611
1612 return BNA_CB_INVALID_MAC;
1613}
1614
1615enum bna_cb_status
1616bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, 1601bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1617 void (*cbfn)(struct bnad *, struct bna_rx *, 1602 void (*cbfn)(struct bnad *, struct bna_rx *,
1618 enum bna_cb_status)) 1603 enum bna_cb_status))
@@ -1862,7 +1847,7 @@ bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1862bfa_fsm_state_decl(bna_rx, rxq_stop_wait, 1847bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1863 struct bna_rx, enum bna_rx_event); 1848 struct bna_rx, enum bna_rx_event);
1864 1849
1865static struct bfa_sm_table rx_sm_table[] = { 1850static const struct bfa_sm_table rx_sm_table[] = {
1866 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED}, 1851 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1867 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT}, 1852 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1868 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED}, 1853 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
@@ -2247,7 +2232,7 @@ bna_rit_create(struct bna_rx *rx)
2247 } 2232 }
2248} 2233}
2249 2234
2250int 2235static int
2251_rx_can_satisfy(struct bna_rx_mod *rx_mod, 2236_rx_can_satisfy(struct bna_rx_mod *rx_mod,
2252 struct bna_rx_config *rx_cfg) 2237 struct bna_rx_config *rx_cfg)
2253{ 2238{
@@ -2272,7 +2257,7 @@ _rx_can_satisfy(struct bna_rx_mod *rx_mod,
2272 return 1; 2257 return 1;
2273} 2258}
2274 2259
2275struct bna_rxq * 2260static struct bna_rxq *
2276_get_free_rxq(struct bna_rx_mod *rx_mod) 2261_get_free_rxq(struct bna_rx_mod *rx_mod)
2277{ 2262{
2278 struct bna_rxq *rxq = NULL; 2263 struct bna_rxq *rxq = NULL;
@@ -2286,7 +2271,7 @@ _get_free_rxq(struct bna_rx_mod *rx_mod)
2286 return rxq; 2271 return rxq;
2287} 2272}
2288 2273
2289void 2274static void
2290_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) 2275_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2291{ 2276{
2292 bfa_q_qe_init(&rxq->qe); 2277 bfa_q_qe_init(&rxq->qe);
@@ -2294,7 +2279,7 @@ _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2294 rx_mod->rxq_free_count++; 2279 rx_mod->rxq_free_count++;
2295} 2280}
2296 2281
2297struct bna_rxp * 2282static struct bna_rxp *
2298_get_free_rxp(struct bna_rx_mod *rx_mod) 2283_get_free_rxp(struct bna_rx_mod *rx_mod)
2299{ 2284{
2300 struct list_head *qe = NULL; 2285 struct list_head *qe = NULL;
@@ -2310,7 +2295,7 @@ _get_free_rxp(struct bna_rx_mod *rx_mod)
2310 return rxp; 2295 return rxp;
2311} 2296}
2312 2297
2313void 2298static void
2314_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) 2299_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2315{ 2300{
2316 bfa_q_qe_init(&rxp->qe); 2301 bfa_q_qe_init(&rxp->qe);
@@ -2318,7 +2303,7 @@ _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2318 rx_mod->rxp_free_count++; 2303 rx_mod->rxp_free_count++;
2319} 2304}
2320 2305
2321struct bna_rx * 2306static struct bna_rx *
2322_get_free_rx(struct bna_rx_mod *rx_mod) 2307_get_free_rx(struct bna_rx_mod *rx_mod)
2323{ 2308{
2324 struct list_head *qe = NULL; 2309 struct list_head *qe = NULL;
@@ -2336,7 +2321,7 @@ _get_free_rx(struct bna_rx_mod *rx_mod)
2336 return rx; 2321 return rx;
2337} 2322}
2338 2323
2339void 2324static void
2340_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx) 2325_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2341{ 2326{
2342 bfa_q_qe_init(&rx->qe); 2327 bfa_q_qe_init(&rx->qe);
@@ -2344,7 +2329,7 @@ _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2344 rx_mod->rx_free_count++; 2329 rx_mod->rx_free_count++;
2345} 2330}
2346 2331
2347void 2332static void
2348_rx_init(struct bna_rx *rx, struct bna *bna) 2333_rx_init(struct bna_rx *rx, struct bna *bna)
2349{ 2334{
2350 rx->bna = bna; 2335 rx->bna = bna;
@@ -2360,7 +2345,7 @@ _rx_init(struct bna_rx *rx, struct bna *bna)
2360 rx->stop_cbarg = NULL; 2345 rx->stop_cbarg = NULL;
2361} 2346}
2362 2347
2363void 2348static void
2364_rxp_add_rxqs(struct bna_rxp *rxp, 2349_rxp_add_rxqs(struct bna_rxp *rxp,
2365 struct bna_rxq *q0, 2350 struct bna_rxq *q0,
2366 struct bna_rxq *q1) 2351 struct bna_rxq *q1)
@@ -2383,7 +2368,7 @@ _rxp_add_rxqs(struct bna_rxp *rxp,
2383 } 2368 }
2384} 2369}
2385 2370
2386void 2371static void
2387_rxq_qpt_init(struct bna_rxq *rxq, 2372_rxq_qpt_init(struct bna_rxq *rxq,
2388 struct bna_rxp *rxp, 2373 struct bna_rxp *rxp,
2389 u32 page_count, 2374 u32 page_count,
@@ -2412,7 +2397,7 @@ _rxq_qpt_init(struct bna_rxq *rxq,
2412 } 2397 }
2413} 2398}
2414 2399
2415void 2400static void
2416_rxp_cqpt_setup(struct bna_rxp *rxp, 2401_rxp_cqpt_setup(struct bna_rxp *rxp,
2417 u32 page_count, 2402 u32 page_count,
2418 u32 page_size, 2403 u32 page_size,
@@ -2441,13 +2426,13 @@ _rxp_cqpt_setup(struct bna_rxp *rxp,
2441 } 2426 }
2442} 2427}
2443 2428
2444void 2429static void
2445_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp) 2430_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
2446{ 2431{
2447 list_add_tail(&rxp->qe, &rx->rxp_q); 2432 list_add_tail(&rxp->qe, &rx->rxp_q);
2448} 2433}
2449 2434
2450void 2435static void
2451_init_rxmod_queues(struct bna_rx_mod *rx_mod) 2436_init_rxmod_queues(struct bna_rx_mod *rx_mod)
2452{ 2437{
2453 INIT_LIST_HEAD(&rx_mod->rx_free_q); 2438 INIT_LIST_HEAD(&rx_mod->rx_free_q);
@@ -2460,7 +2445,7 @@ _init_rxmod_queues(struct bna_rx_mod *rx_mod)
2460 rx_mod->rxp_free_count = 0; 2445 rx_mod->rxp_free_count = 0;
2461} 2446}
2462 2447
2463void 2448static void
2464_rx_ctor(struct bna_rx *rx, int id) 2449_rx_ctor(struct bna_rx *rx, int id)
2465{ 2450{
2466 bfa_q_qe_init(&rx->qe); 2451 bfa_q_qe_init(&rx->qe);
@@ -2492,7 +2477,7 @@ bna_rx_cb_rxq_stopped_all(void *arg)
2492 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED); 2477 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
2493} 2478}
2494 2479
2495void 2480static void
2496bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx, 2481bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2497 enum bna_cb_status status) 2482 enum bna_cb_status status)
2498{ 2483{
@@ -2501,7 +2486,7 @@ bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2501 bfa_wc_down(&rx_mod->rx_stop_wc); 2486 bfa_wc_down(&rx_mod->rx_stop_wc);
2502} 2487}
2503 2488
2504void 2489static void
2505bna_rx_mod_cb_rx_stopped_all(void *arg) 2490bna_rx_mod_cb_rx_stopped_all(void *arg)
2506{ 2491{
2507 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; 2492 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
@@ -2511,7 +2496,7 @@ bna_rx_mod_cb_rx_stopped_all(void *arg)
2511 rx_mod->stop_cbfn = NULL; 2496 rx_mod->stop_cbfn = NULL;
2512} 2497}
2513 2498
2514void 2499static void
2515bna_rx_start(struct bna_rx *rx) 2500bna_rx_start(struct bna_rx *rx)
2516{ 2501{
2517 rx->rx_flags |= BNA_RX_F_PORT_ENABLED; 2502 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
@@ -2519,7 +2504,7 @@ bna_rx_start(struct bna_rx *rx)
2519 bfa_fsm_send_event(rx, RX_E_START); 2504 bfa_fsm_send_event(rx, RX_E_START);
2520} 2505}
2521 2506
2522void 2507static void
2523bna_rx_stop(struct bna_rx *rx) 2508bna_rx_stop(struct bna_rx *rx)
2524{ 2509{
2525 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED; 2510 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
@@ -2532,7 +2517,7 @@ bna_rx_stop(struct bna_rx *rx)
2532 } 2517 }
2533} 2518}
2534 2519
2535void 2520static void
2536bna_rx_fail(struct bna_rx *rx) 2521bna_rx_fail(struct bna_rx *rx)
2537{ 2522{
2538 /* Indicate port is not enabled, and failed */ 2523 /* Indicate port is not enabled, and failed */
@@ -2542,28 +2527,6 @@ bna_rx_fail(struct bna_rx *rx)
2542} 2527}
2543 2528
2544void 2529void
2545bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
2546{
2547 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
2548 if (rx->rxf.rxf_id < 32)
2549 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
2550 else
2551 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
2552 1 << (rx->rxf.rxf_id - 32));
2553}
2554
2555void
2556bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
2557{
2558 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
2559 if (rx->rxf.rxf_id < 32)
2560 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
2561 else
2562 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
2563 1 << (rx->rxf.rxf_id - 32);
2564}
2565
2566void
2567bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) 2530bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2568{ 2531{
2569 struct bna_rx *rx; 2532 struct bna_rx *rx;
@@ -3731,7 +3694,7 @@ bna_tx_fail(struct bna_tx *tx)
3731 bfa_fsm_send_event(tx, TX_E_FAIL); 3694 bfa_fsm_send_event(tx, TX_E_FAIL);
3732} 3695}
3733 3696
3734void 3697static void
3735bna_tx_prio_changed(struct bna_tx *tx, int prio) 3698bna_tx_prio_changed(struct bna_tx *tx, int prio)
3736{ 3699{
3737 struct bna_txq *txq; 3700 struct bna_txq *txq;
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 7210c34d2d5b..74c64d6c8801 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -28,7 +28,7 @@
28#include "bna.h" 28#include "bna.h"
29#include "cna.h" 29#include "cna.h"
30 30
31DEFINE_MUTEX(bnad_fwimg_mutex); 31static DEFINE_MUTEX(bnad_fwimg_mutex);
32 32
33/* 33/*
34 * Module params 34 * Module params
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46 */ 46 */
47u32 bnad_rxqs_per_cq = 2; 47u32 bnad_rxqs_per_cq = 2;
48 48
49const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 49static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
50 50
51/* 51/*
52 * Local MACROS 52 * Local MACROS
@@ -687,7 +687,7 @@ bnad_enable_mbox_irq(struct bnad *bnad)
687 * Called with bnad->bna_lock held b'cos of 687 * Called with bnad->bna_lock held b'cos of
688 * bnad->cfg_flags access. 688 * bnad->cfg_flags access.
689 */ 689 */
690void 690static void
691bnad_disable_mbox_irq(struct bnad *bnad) 691bnad_disable_mbox_irq(struct bnad *bnad)
692{ 692{
693 int irq = BNAD_GET_MBOX_IRQ(bnad); 693 int irq = BNAD_GET_MBOX_IRQ(bnad);
@@ -956,11 +956,6 @@ bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
956 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 956 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
957} 957}
958 958
959void
960bnad_cb_stats_clr(struct bnad *bnad)
961{
962}
963
964/* Resource allocation, free functions */ 959/* Resource allocation, free functions */
965 960
966static void 961static void
@@ -1111,8 +1106,10 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
1111 } 1106 }
1112 1107
1113 spin_lock_irqsave(&bnad->bna_lock, flags); 1108 spin_lock_irqsave(&bnad->bna_lock, flags);
1109
1114 if (bnad->cfg_flags & BNAD_CF_MSIX) 1110 if (bnad->cfg_flags & BNAD_CF_MSIX)
1115 disable_irq_nosync(irq); 1111 disable_irq_nosync(irq);
1112
1116 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1113 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1117 return 0; 1114 return 0;
1118} 1115}
@@ -2243,7 +2240,6 @@ static void
2243bnad_enable_msix(struct bnad *bnad) 2240bnad_enable_msix(struct bnad *bnad)
2244{ 2241{
2245 int i, ret; 2242 int i, ret;
2246 u32 tot_msix_num;
2247 unsigned long flags; 2243 unsigned long flags;
2248 2244
2249 spin_lock_irqsave(&bnad->bna_lock, flags); 2245 spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -2256,18 +2252,16 @@ bnad_enable_msix(struct bnad *bnad)
2256 if (bnad->msix_table) 2252 if (bnad->msix_table)
2257 return; 2253 return;
2258 2254
2259 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2260
2261 bnad->msix_table = 2255 bnad->msix_table =
2262 kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL); 2256 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2263 2257
2264 if (!bnad->msix_table) 2258 if (!bnad->msix_table)
2265 goto intx_mode; 2259 goto intx_mode;
2266 2260
2267 for (i = 0; i < tot_msix_num; i++) 2261 for (i = 0; i < bnad->msix_num; i++)
2268 bnad->msix_table[i].entry = i; 2262 bnad->msix_table[i].entry = i;
2269 2263
2270 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num); 2264 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2271 if (ret > 0) { 2265 if (ret > 0) {
2272 /* Not enough MSI-X vectors. */ 2266 /* Not enough MSI-X vectors. */
2273 2267
@@ -2280,12 +2274,11 @@ bnad_enable_msix(struct bnad *bnad)
2280 + (bnad->num_rx 2274 + (bnad->num_rx
2281 * bnad->num_rxp_per_rx) + 2275 * bnad->num_rxp_per_rx) +
2282 BNAD_MAILBOX_MSIX_VECTORS; 2276 BNAD_MAILBOX_MSIX_VECTORS;
2283 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2284 2277
2285 /* Try once more with adjusted numbers */ 2278 /* Try once more with adjusted numbers */
2286 /* If this fails, fall back to INTx */ 2279 /* If this fails, fall back to INTx */
2287 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, 2280 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2288 tot_msix_num); 2281 bnad->msix_num);
2289 if (ret) 2282 if (ret)
2290 goto intx_mode; 2283 goto intx_mode;
2291 2284
@@ -2298,7 +2291,6 @@ intx_mode:
2298 kfree(bnad->msix_table); 2291 kfree(bnad->msix_table);
2299 bnad->msix_table = NULL; 2292 bnad->msix_table = NULL;
2300 bnad->msix_num = 0; 2293 bnad->msix_num = 0;
2301 bnad->msix_diag_num = 0;
2302 spin_lock_irqsave(&bnad->bna_lock, flags); 2294 spin_lock_irqsave(&bnad->bna_lock, flags);
2303 bnad->cfg_flags &= ~BNAD_CF_MSIX; 2295 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2304 bnad_q_num_init(bnad); 2296 bnad_q_num_init(bnad);
@@ -2946,7 +2938,6 @@ bnad_init(struct bnad *bnad,
2946 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + 2938 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2947 (bnad->num_rx * bnad->num_rxp_per_rx) + 2939 (bnad->num_rx * bnad->num_rxp_per_rx) +
2948 BNAD_MAILBOX_MSIX_VECTORS; 2940 BNAD_MAILBOX_MSIX_VECTORS;
2949 bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
2950 2941
2951 bnad->txq_depth = BNAD_TXQ_DEPTH; 2942 bnad->txq_depth = BNAD_TXQ_DEPTH;
2952 bnad->rxq_depth = BNAD_RXQ_DEPTH; 2943 bnad->rxq_depth = BNAD_RXQ_DEPTH;
@@ -3217,7 +3208,7 @@ bnad_pci_remove(struct pci_dev *pdev)
3217 free_netdev(netdev); 3208 free_netdev(netdev);
3218} 3209}
3219 3210
3220const struct pci_device_id bnad_pci_id_table[] = { 3211static const struct pci_device_id bnad_pci_id_table[] = {
3221 { 3212 {
3222 PCI_DEVICE(PCI_VENDOR_ID_BROCADE, 3213 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3223 PCI_DEVICE_ID_BROCADE_CT), 3214 PCI_DEVICE_ID_BROCADE_CT),
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index ee377888b905..ebc3a9078642 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -248,7 +248,6 @@ struct bnad {
248 u64 mmio_len; 248 u64 mmio_len;
249 249
250 u32 msix_num; 250 u32 msix_num;
251 u32 msix_diag_num;
252 struct msix_entry *msix_table; 251 struct msix_entry *msix_table;
253 252
254 struct mutex conf_mutex; 253 struct mutex conf_mutex;
diff --git a/drivers/net/bna/cna_fwimg.c b/drivers/net/bna/cna_fwimg.c
index 0bd1d3790a27..e8f4ecd9ebb5 100644
--- a/drivers/net/bna/cna_fwimg.c
+++ b/drivers/net/bna/cna_fwimg.c
@@ -22,7 +22,7 @@ const struct firmware *bfi_fw;
22static u32 *bfi_image_ct_cna; 22static u32 *bfi_image_ct_cna;
23static u32 bfi_image_ct_cna_size; 23static u32 bfi_image_ct_cna_size;
24 24
25u32 * 25static u32 *
26cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 26cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
27 u32 *bfi_image_size, char *fw_name) 27 u32 *bfi_image_size, char *fw_name)
28{ 28{