aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRasesh Mody <rmody@brocade.com>2011-08-08 12:21:39 -0400
committerDavid S. Miller <davem@davemloft.net>2011-08-11 10:30:13 -0400
commit078086f3c17fae8af6c077153773c4a10392ffbf (patch)
tree009c110c4f735f15eb149b49c8290e1d9e5c424a /drivers
parent6849c6b30772bb08ed52c3ec00e8245e70e25a2b (diff)
bna: ENET and Tx Rx Redesign Enablement
Change details: This patch contains additional structure and function definition changes that are required to enable the new msgq/enet/txrx redesign introduced by the previous 4 patches. - structure and function definition changes to header files as a result of Ethport, Enet, IOCEth, Tx, Rx redesign. - ethtool changes to use new enet function and definitions - Set number of Tx and Rx queues bassed on underlying hardware. Define separate macros for maximum and supported numbers of Tx and Rx queues based on underlying hardware. Take VLAN header into account for MTU calculation. Default to INTx mode when pci_enable_msix() fails. Set a bit in Rx poll routine, check and wait for that bit to be cleared in the cleanup routine before proceeding. - The TX and Rx coalesce settings are programmed in steps of 5 us. The value that are not divisible by 5 are rounded to the next lower number. This was causing the value os 1 to 4 to be rounded to 0, which is an invalid setting. When creating Rx and Tx object, we are currently assigning the default values of Rx and Tx coalescing_timeo. If these values are changed in the driver to a different value, the change is lost during such operations as MTU change. In order to avoid that, pass the configured value of coalescing_timeo before Rx and Tx object creation. Fix bnad_tx_coalescing_timeo_set() so it applies to all the Tx objects. - Reorg uninitialization path in case of pci_probe failure. - Hardware clock setup changes to pass asic generation, port modes and asic mode as part firmware boot parameters to firmware. - FW mailbox interface changes to defined asic specific mailbox interfaces. h/w mailbox interfaces take 8-bit FIDs and 2-bit port id for owner. Cleaned up mailbox definitions and usage for new and old HW. Eliminated usage of ASIC ID. MSI-X vector assignment and programming done by firmware. Fixed host offsets for CPE/RME queue registers. - Implement polling mechanism for FW ready to have poll mechanism replaces the current interrupt based FW READY method. The timer based poll routine in IOC will query the ioc_fwstate register to see if there is a state change in FW, and sends the READY event. Removed infrastructure needed to support mbox READY event from fw as well as IOC code. - Move FW init to HW init. Handle the case where PCI mapping goes away when IOCPF state machine is waiting for semaphore. - Add IOC mbox call back to client indicating that the command is sent. Signed-off-by: Rasesh Mody <rmody@brocade.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h25
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h20
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c389
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h36
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c41
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h74
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h224
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h494
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c643
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h36
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c65
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h31
13 files changed, 1337 insertions, 743 deletions
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index d501f520b0bc..74d3abca1960 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -5,7 +5,7 @@
5 5
6obj-$(CONFIG_BNA) += bna.o 6obj-$(CONFIG_BNA) += bna.o
7 7
8bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o 8bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
9bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o 9bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
10bna-objs += cna_fwimg.o 10bna-objs += cna_fwimg.o
11 11
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index b080b3698f48..205b92b3709c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -124,6 +124,7 @@ enum bfa_ioc_state {
124 BFA_IOC_DISABLED = 10, /*!< IOC is disabled */ 124 BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
125 BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */ 125 BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
126 BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */ 126 BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
127 BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
127}; 128};
128 129
129/** 130/**
@@ -179,8 +180,19 @@ struct bfa_ioc_attr {
179 struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */ 180 struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
180 struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */ 181 struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
181 struct bfa_ioc_pci_attr pci_attr; 182 struct bfa_ioc_pci_attr pci_attr;
182 u8 port_id; /*!< port number */ 183 u8 port_id; /*!< port number */
183 u8 rsvd[7]; /*!< 64bit align */ 184 u8 port_mode; /*!< enum bfa_mode */
185 u8 cap_bm; /*!< capability */
186 u8 port_mode_cfg; /*!< enum bfa_mode */
187 u8 rsvd[4]; /*!< 64bit align */
188};
189
190/**
191 * Adapter capability mask definition
192 */
193enum {
194 BFA_CM_HBA = 0x01,
195 BFA_CM_CNA = 0x02,
184}; 196};
185 197
186/** 198/**
@@ -228,7 +240,7 @@ struct bfa_mfg_block {
228 mac_t mfg_mac; /*!< mac address */ 240 mac_t mfg_mac; /*!< mac address */
229 u8 num_mac; /*!< number of mac addresses */ 241 u8 num_mac; /*!< number of mac addresses */
230 u8 rsv2; 242 u8 rsv2;
231 u32 mfg_type; /*!< card type */ 243 u32 card_type; /*!< card type */
232 u8 rsv3[108]; 244 u8 rsv3[108];
233 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */ 245 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
234}; 246};
@@ -242,5 +254,12 @@ struct bfa_mfg_block {
242#define bfa_asic_id_ct(devid) \ 254#define bfa_asic_id_ct(devid) \
243 ((devid) == PCI_DEVICE_ID_BROCADE_CT || \ 255 ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
244 (devid) == PCI_DEVICE_ID_BROCADE_CT_FC) 256 (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
257#define bfa_asic_id_ctc(devid) (bfa_asic_id_ct(devid))
258
259enum bfa_mode {
260 BFA_MODE_HBA = 1,
261 BFA_MODE_CNA = 2,
262 BFA_MODE_NIC = 3
263};
245 264
246#endif /* __BFA_DEFS_H__ */ 265#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 885ef3afdd4e..f84d8f674812 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -19,11 +19,12 @@
19#define __BFA_DEFS_MFG_COMM_H__ 19#define __BFA_DEFS_MFG_COMM_H__
20 20
21#include "cna.h" 21#include "cna.h"
22#include "bfa_defs.h"
22 23
23/** 24/**
24 * Manufacturing block version 25 * Manufacturing block version
25 */ 26 */
26#define BFA_MFG_VERSION 2 27#define BFA_MFG_VERSION 3
27#define BFA_MFG_VERSION_UNINIT 0xFF 28#define BFA_MFG_VERSION_UNINIT 0xFF
28 29
29/** 30/**
@@ -95,27 +96,14 @@ enum {
95 (type) == BFA_MFG_TYPE_CNA10P1 || \ 96 (type) == BFA_MFG_TYPE_CNA10P1 || \
96 bfa_mfg_is_mezz(type))) 97 bfa_mfg_is_mezz(type)))
97 98
98#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \ 99#define bfa_mfg_adapter_prop_init_flash_ct(mfgblk, prop) \
99do { \ 100do { \
100 switch ((card_type)) { \ 101 switch ((mfgblk)->card_type) { \
101 case BFA_MFG_TYPE_FC8P2: \
102 case BFA_MFG_TYPE_JAYHAWK: \ 102 case BFA_MFG_TYPE_JAYHAWK: \
103 case BFA_MFG_TYPE_ASTRA: \ 103 case BFA_MFG_TYPE_ASTRA: \
104 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \ 104 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
105 BFI_ADAPTER_SETP(SPEED, 8); \ 105 BFI_ADAPTER_SETP(SPEED, 8); \
106 break; \ 106 break; \
107 case BFA_MFG_TYPE_FC8P1: \
108 (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
109 BFI_ADAPTER_SETP(SPEED, 8); \
110 break; \
111 case BFA_MFG_TYPE_FC4P2: \
112 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
113 BFI_ADAPTER_SETP(SPEED, 4); \
114 break; \
115 case BFA_MFG_TYPE_FC4P1: \
116 (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
117 BFI_ADAPTER_SETP(SPEED, 4); \
118 break; \
119 case BFA_MFG_TYPE_CNA10P2: \ 107 case BFA_MFG_TYPE_CNA10P2: \
120 case BFA_MFG_TYPE_WANCHESE: \ 108 case BFA_MFG_TYPE_WANCHESE: \
121 case BFA_MFG_TYPE_LIGHTNING_P0: \ 109 case BFA_MFG_TYPE_LIGHTNING_P0: \
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 2d5c4fd778ee..029fb527e80d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -62,6 +62,7 @@ static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
62static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); 62static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
63static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); 63static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
64static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); 64static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
65static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
65static void bfa_ioc_send_enable(struct bfa_ioc *ioc); 66static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
66static void bfa_ioc_send_disable(struct bfa_ioc *ioc); 67static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
67static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); 68static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
@@ -78,8 +79,8 @@ static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
78static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); 79static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
79static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); 80static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
80static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); 81static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
81static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
82static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 82static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
83static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
83static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 84static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
84static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 85static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
85 u32 boot_param); 86 u32 boot_param);
@@ -108,11 +109,11 @@ enum ioc_event {
108 IOC_E_ENABLED = 5, /*!< f/w enabled */ 109 IOC_E_ENABLED = 5, /*!< f/w enabled */
109 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ 110 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
110 IOC_E_DISABLED = 7, /*!< f/w disabled */ 111 IOC_E_DISABLED = 7, /*!< f/w disabled */
111 IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */ 112 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
112 IOC_E_PFFAILED = 9, /*!< failure notice by iocpf sm */ 113 IOC_E_HBFAIL = 9, /*!< heartbeat failure */
113 IOC_E_HBFAIL = 10, /*!< heartbeat failure */ 114 IOC_E_HWERROR = 10, /*!< hardware error interrupt */
114 IOC_E_HWERROR = 11, /*!< hardware error interrupt */ 115 IOC_E_TIMEOUT = 11, /*!< timeout */
115 IOC_E_TIMEOUT = 12, /*!< timeout */ 116 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
116}; 117};
117 118
118bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); 119bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
@@ -124,6 +125,7 @@ bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); 125bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
125bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 126bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 127bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
127 129
128static struct bfa_sm_table ioc_sm_table[] = { 130static struct bfa_sm_table ioc_sm_table[] = {
129 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 131 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -135,6 +137,7 @@ static struct bfa_sm_table ioc_sm_table[] = {
135 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, 137 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
136 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 138 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 139 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
140 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
138}; 141};
139 142
140/** 143/**
@@ -166,6 +169,7 @@ enum iocpf_event {
166 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ 169 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
167 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 170 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
168 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ 171 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
172 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
169}; 173};
170 174
171/** 175/**
@@ -300,11 +304,16 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
300 /* !!! fall through !!! */ 304 /* !!! fall through !!! */
301 case IOC_E_HWERROR: 305 case IOC_E_HWERROR:
302 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 306 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
303 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 307 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
304 if (event != IOC_E_PFFAILED) 308 if (event != IOC_E_PFFAILED)
305 bfa_iocpf_initfail(ioc); 309 bfa_iocpf_initfail(ioc);
306 break; 310 break;
307 311
312 case IOC_E_HWFAILED:
313 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
314 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
315 break;
316
308 case IOC_E_DISABLE: 317 case IOC_E_DISABLE:
309 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 318 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
310 break; 319 break;
@@ -343,6 +352,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
343 case IOC_E_FWRSP_GETATTR: 352 case IOC_E_FWRSP_GETATTR:
344 del_timer(&ioc->ioc_timer); 353 del_timer(&ioc->ioc_timer);
345 bfa_ioc_check_attr_wwns(ioc); 354 bfa_ioc_check_attr_wwns(ioc);
355 bfa_ioc_hb_monitor(ioc);
346 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 356 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
347 break; 357 break;
348 358
@@ -352,7 +362,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
352 /* fall through */ 362 /* fall through */
353 case IOC_E_TIMEOUT: 363 case IOC_E_TIMEOUT:
354 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 364 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
355 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 365 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
356 if (event != IOC_E_PFFAILED) 366 if (event != IOC_E_PFFAILED)
357 bfa_iocpf_getattrfail(ioc); 367 bfa_iocpf_getattrfail(ioc);
358 break; 368 break;
@@ -374,7 +384,7 @@ static void
374bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) 384bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
375{ 385{
376 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 386 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
377 bfa_ioc_hb_monitor(ioc); 387 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
378} 388}
379 389
380static void 390static void
@@ -394,12 +404,13 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
394 bfa_ioc_hb_stop(ioc); 404 bfa_ioc_hb_stop(ioc);
395 /* !!! fall through !!! */ 405 /* !!! fall through !!! */
396 case IOC_E_HBFAIL: 406 case IOC_E_HBFAIL:
397 bfa_ioc_fail_notify(ioc);
398 if (ioc->iocpf.auto_recover) 407 if (ioc->iocpf.auto_recover)
399 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); 408 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
400 else 409 else
401 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 410 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
402 411
412 bfa_ioc_fail_notify(ioc);
413
403 if (event != IOC_E_PFFAILED) 414 if (event != IOC_E_PFFAILED)
404 bfa_iocpf_fail(ioc); 415 bfa_iocpf_fail(ioc);
405 break; 416 break;
@@ -435,6 +446,11 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
435 bfa_iocpf_fail(ioc); 446 bfa_iocpf_fail(ioc);
436 break; 447 break;
437 448
449 case IOC_E_HWFAILED:
450 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
451 bfa_ioc_disable_comp(ioc);
452 break;
453
438 default: 454 default:
439 bfa_sm_fault(event); 455 bfa_sm_fault(event);
440 } 456 }
@@ -493,12 +509,14 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
493 * Initialization retry failed. 509 * Initialization retry failed.
494 */ 510 */
495 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 511 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
512 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
496 if (event != IOC_E_PFFAILED) 513 if (event != IOC_E_PFFAILED)
497 bfa_iocpf_initfail(ioc); 514 bfa_iocpf_initfail(ioc);
498 break; 515 break;
499 516
500 case IOC_E_INITFAILED: 517 case IOC_E_HWFAILED:
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); 518 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
502 break; 520 break;
503 521
504 case IOC_E_ENABLE: 522 case IOC_E_ENABLE:
@@ -552,6 +570,36 @@ bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
552 } 570 }
553} 571}
554 572
573static void
574bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
575{
576}
577
578/**
579 * IOC failure.
580 */
581static void
582bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
583{
584 switch (event) {
585
586 case IOC_E_ENABLE:
587 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
588 break;
589
590 case IOC_E_DISABLE:
591 ioc->cbfn->disable_cbfn(ioc->bfa);
592 break;
593
594 case IOC_E_DETACH:
595 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
596 break;
597
598 default:
599 bfa_sm_fault(event);
600 }
601}
602
555/** 603/**
556 * IOCPF State Machine 604 * IOCPF State Machine
557 */ 605 */
@@ -562,7 +610,7 @@ bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
562static void 610static void
563bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) 611bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
564{ 612{
565 iocpf->retry_count = 0; 613 iocpf->fw_mismatch_notified = false;
566 iocpf->auto_recover = bfa_nw_auto_recover; 614 iocpf->auto_recover = bfa_nw_auto_recover;
567} 615}
568 616
@@ -607,7 +655,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
607 case IOCPF_E_SEMLOCKED: 655 case IOCPF_E_SEMLOCKED:
608 if (bfa_ioc_firmware_lock(ioc)) { 656 if (bfa_ioc_firmware_lock(ioc)) {
609 if (bfa_ioc_sync_start(ioc)) { 657 if (bfa_ioc_sync_start(ioc)) {
610 iocpf->retry_count = 0;
611 bfa_ioc_sync_join(ioc); 658 bfa_ioc_sync_join(ioc);
612 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 659 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
613 } else { 660 } else {
@@ -622,6 +669,11 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
622 } 669 }
623 break; 670 break;
624 671
672 case IOCPF_E_SEM_ERROR:
673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
674 bfa_ioc_pf_hwfailed(ioc);
675 break;
676
625 case IOCPF_E_DISABLE: 677 case IOCPF_E_DISABLE:
626 bfa_ioc_hw_sem_get_cancel(ioc); 678 bfa_ioc_hw_sem_get_cancel(ioc);
627 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 679 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
@@ -645,10 +697,10 @@ static void
645bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) 697bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
646{ 698{
647 /* Call only the first time sm enters fwmismatch state. */ 699 /* Call only the first time sm enters fwmismatch state. */
648 if (iocpf->retry_count == 0) 700 if (iocpf->fw_mismatch_notified == false)
649 bfa_ioc_pf_fwmismatch(iocpf->ioc); 701 bfa_ioc_pf_fwmismatch(iocpf->ioc);
650 702
651 iocpf->retry_count++; 703 iocpf->fw_mismatch_notified = true;
652 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 704 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
653 msecs_to_jiffies(BFA_IOC_TOV)); 705 msecs_to_jiffies(BFA_IOC_TOV));
654} 706}
@@ -711,6 +763,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
711 } 763 }
712 break; 764 break;
713 765
766 case IOCPF_E_SEM_ERROR:
767 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
768 bfa_ioc_pf_hwfailed(ioc);
769 break;
770
714 case IOCPF_E_DISABLE: 771 case IOCPF_E_DISABLE:
715 bfa_ioc_hw_sem_get_cancel(ioc); 772 bfa_ioc_hw_sem_get_cancel(ioc);
716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 773 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -724,8 +781,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
724static void 781static void
725bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) 782bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
726{ 783{
727 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 784 iocpf->poll_time = 0;
728 msecs_to_jiffies(BFA_IOC_TOV));
729 bfa_ioc_reset(iocpf->ioc, 0); 785 bfa_ioc_reset(iocpf->ioc, 0);
730} 786}
731 787
@@ -740,19 +796,11 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
740 796
741 switch (event) { 797 switch (event) {
742 case IOCPF_E_FWREADY: 798 case IOCPF_E_FWREADY:
743 del_timer(&ioc->iocpf_timer);
744 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); 799 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
745 break; 800 break;
746 801
747 case IOCPF_E_INITFAIL:
748 del_timer(&ioc->iocpf_timer);
749 /*
750 * !!! fall through !!!
751 */
752
753 case IOCPF_E_TIMEOUT: 802 case IOCPF_E_TIMEOUT:
754 bfa_nw_ioc_hw_sem_release(ioc); 803 bfa_nw_ioc_hw_sem_release(ioc);
755 if (event == IOCPF_E_TIMEOUT)
756 bfa_ioc_pf_failed(ioc); 804 bfa_ioc_pf_failed(ioc);
757 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); 805 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
758 break; 806 break;
@@ -774,6 +822,10 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
774{ 822{
775 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + 823 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
776 msecs_to_jiffies(BFA_IOC_TOV)); 824 msecs_to_jiffies(BFA_IOC_TOV));
825 /**
826 * Enable Interrupts before sending fw IOC ENABLE cmd.
827 */
828 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
777 bfa_ioc_send_enable(iocpf->ioc); 829 bfa_ioc_send_enable(iocpf->ioc);
778} 830}
779 831
@@ -811,21 +863,11 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
811 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 863 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
812 break; 864 break;
813 865
814 case IOCPF_E_FWREADY:
815 bfa_ioc_send_enable(ioc);
816 break;
817
818 default: 866 default:
819 bfa_sm_fault(event); 867 bfa_sm_fault(event);
820 } 868 }
821} 869}
822 870
823static bool
824bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
825{
826 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
827}
828
829static void 871static void
830bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) 872bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
831{ 873{
@@ -835,8 +877,6 @@ bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
835static void 877static void
836bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) 878bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
837{ 879{
838 struct bfa_ioc *ioc = iocpf->ioc;
839
840 switch (event) { 880 switch (event) {
841 case IOCPF_E_DISABLE: 881 case IOCPF_E_DISABLE:
842 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 882 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
@@ -850,14 +890,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
850 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); 890 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
851 break; 891 break;
852 892
853 case IOCPF_E_FWREADY:
854 bfa_ioc_pf_failed(ioc);
855 if (bfa_nw_ioc_is_operational(ioc))
856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
857 else
858 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
859 break;
860
861 default: 893 default:
862 bfa_sm_fault(event); 894 bfa_sm_fault(event);
863 } 895 }
@@ -881,7 +913,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
881 913
882 switch (event) { 914 switch (event) {
883 case IOCPF_E_FWRSP_DISABLE: 915 case IOCPF_E_FWRSP_DISABLE:
884 case IOCPF_E_FWREADY:
885 del_timer(&ioc->iocpf_timer); 916 del_timer(&ioc->iocpf_timer);
886 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 917 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
887 break; 918 break;
@@ -926,6 +957,11 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
926 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 957 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
927 break; 958 break;
928 959
960 case IOCPF_E_SEM_ERROR:
961 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
962 bfa_ioc_pf_hwfailed(ioc);
963 break;
964
929 case IOCPF_E_FAIL: 965 case IOCPF_E_FAIL:
930 break; 966 break;
931 967
@@ -951,7 +987,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
951 987
952 switch (event) { 988 switch (event) {
953 case IOCPF_E_ENABLE: 989 case IOCPF_E_ENABLE:
954 iocpf->retry_count = 0;
955 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 990 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
956 break; 991 break;
957 992
@@ -982,20 +1017,15 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
982 switch (event) { 1017 switch (event) {
983 case IOCPF_E_SEMLOCKED: 1018 case IOCPF_E_SEMLOCKED:
984 bfa_ioc_notify_fail(ioc); 1019 bfa_ioc_notify_fail(ioc);
985 bfa_ioc_sync_ack(ioc); 1020 bfa_ioc_sync_leave(ioc);
986 iocpf->retry_count++; 1021 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
987 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { 1022 bfa_nw_ioc_hw_sem_release(ioc);
988 bfa_ioc_sync_leave(ioc); 1023 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
989 bfa_nw_ioc_hw_sem_release(ioc); 1024 break;
990 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 1025
991 } else { 1026 case IOCPF_E_SEM_ERROR:
992 if (bfa_ioc_sync_complete(ioc)) 1027 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
993 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 1028 bfa_ioc_pf_hwfailed(ioc);
994 else {
995 bfa_nw_ioc_hw_sem_release(ioc);
996 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
997 }
998 }
999 break; 1029 break;
1000 1030
1001 case IOCPF_E_DISABLE: 1031 case IOCPF_E_DISABLE:
@@ -1020,7 +1050,6 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1020static void 1050static void
1021bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) 1051bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1022{ 1052{
1023 bfa_ioc_pf_initfailed(iocpf->ioc);
1024} 1053}
1025 1054
1026/** 1055/**
@@ -1071,11 +1100,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1071 1100
1072 switch (event) { 1101 switch (event) {
1073 case IOCPF_E_SEMLOCKED: 1102 case IOCPF_E_SEMLOCKED:
1074 iocpf->retry_count = 0;
1075 bfa_ioc_sync_ack(ioc); 1103 bfa_ioc_sync_ack(ioc);
1076 bfa_ioc_notify_fail(ioc); 1104 bfa_ioc_notify_fail(ioc);
1077 if (!iocpf->auto_recover) { 1105 if (!iocpf->auto_recover) {
1078 bfa_ioc_sync_leave(ioc); 1106 bfa_ioc_sync_leave(ioc);
1107 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1079 bfa_nw_ioc_hw_sem_release(ioc); 1108 bfa_nw_ioc_hw_sem_release(ioc);
1080 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 1109 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1081 } else { 1110 } else {
@@ -1088,6 +1117,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1088 } 1117 }
1089 break; 1118 break;
1090 1119
1120 case IOCPF_E_SEM_ERROR:
1121 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1122 bfa_ioc_pf_hwfailed(ioc);
1123 break;
1124
1091 case IOCPF_E_DISABLE: 1125 case IOCPF_E_DISABLE:
1092 bfa_ioc_hw_sem_get_cancel(ioc); 1126 bfa_ioc_hw_sem_get_cancel(ioc);
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 1127 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -1158,13 +1192,13 @@ bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1158 1192
1159 r32 = readl(sem_reg); 1193 r32 = readl(sem_reg);
1160 1194
1161 while (r32 && (cnt < BFA_SEM_SPINCNT)) { 1195 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1162 cnt++; 1196 cnt++;
1163 udelay(2); 1197 udelay(2);
1164 r32 = readl(sem_reg); 1198 r32 = readl(sem_reg);
1165 } 1199 }
1166 1200
1167 if (r32 == 0) 1201 if (!(r32 & 1))
1168 return true; 1202 return true;
1169 1203
1170 BUG_ON(!(cnt < BFA_SEM_SPINCNT)); 1204 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
@@ -1210,7 +1244,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1210 * will return 1. Semaphore is released by writing 1 to the register 1244 * will return 1. Semaphore is released by writing 1 to the register
1211 */ 1245 */
1212 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1246 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1213 if (r32 == 0) { 1247 if (r32 == ~0) {
1248 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1249 return;
1250 }
1251 if (!(r32 & 1)) {
1214 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); 1252 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1215 return; 1253 return;
1216 } 1254 }
@@ -1331,7 +1369,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1331 int i; 1369 int i;
1332 1370
1333 drv_fwhdr = (struct bfi_ioc_image_hdr *) 1371 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1334 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1372 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1335 1373
1336 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1374 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1337 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) 1375 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
@@ -1352,12 +1390,12 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1352 1390
1353 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1391 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1354 drv_fwhdr = (struct bfi_ioc_image_hdr *) 1392 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1355 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1393 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1356 1394
1357 if (fwhdr.signature != drv_fwhdr->signature) 1395 if (fwhdr.signature != drv_fwhdr->signature)
1358 return false; 1396 return false;
1359 1397
1360 if (swab32(fwhdr.param) != boot_env) 1398 if (swab32(fwhdr.bootenv) != boot_env)
1361 return false; 1399 return false;
1362 1400
1363 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1401 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1388,11 +1426,11 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1388 1426
1389 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1427 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1390 1428
1391 boot_env = BFI_BOOT_LOADER_OS;
1392
1393 if (force) 1429 if (force)
1394 ioc_fwstate = BFI_IOC_UNINIT; 1430 ioc_fwstate = BFI_IOC_UNINIT;
1395 1431
1432 boot_env = BFI_FWBOOT_ENV_OS;
1433
1396 /** 1434 /**
1397 * check if firmware is valid 1435 * check if firmware is valid
1398 */ 1436 */
@@ -1400,7 +1438,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1400 false : bfa_ioc_fwver_valid(ioc, boot_env); 1438 false : bfa_ioc_fwver_valid(ioc, boot_env);
1401 1439
1402 if (!fwvalid) { 1440 if (!fwvalid) {
1403 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env); 1441 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1442 bfa_ioc_poll_fwinit(ioc);
1404 return; 1443 return;
1405 } 1444 }
1406 1445
@@ -1409,7 +1448,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1409 * just wait for an initialization completion interrupt. 1448 * just wait for an initialization completion interrupt.
1410 */ 1449 */
1411 if (ioc_fwstate == BFI_IOC_INITING) { 1450 if (ioc_fwstate == BFI_IOC_INITING) {
1412 ioc->cbfn->reset_cbfn(ioc->bfa); 1451 bfa_ioc_poll_fwinit(ioc);
1413 return; 1452 return;
1414 } 1453 }
1415 1454
@@ -1423,7 +1462,6 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1423 * be flushed. Otherwise MSI-X interrupts are not delivered. 1462 * be flushed. Otherwise MSI-X interrupts are not delivered.
1424 */ 1463 */
1425 bfa_ioc_msgflush(ioc); 1464 bfa_ioc_msgflush(ioc);
1426 ioc->cbfn->reset_cbfn(ioc->bfa);
1427 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); 1465 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1428 return; 1466 return;
1429 } 1467 }
@@ -1431,7 +1469,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1431 /** 1469 /**
1432 * Initialize the h/w for any other states. 1470 * Initialize the h/w for any other states.
1433 */ 1471 */
1434 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env); 1472 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1473 bfa_ioc_poll_fwinit(ioc);
1435} 1474}
1436 1475
1437void 1476void
@@ -1475,7 +1514,7 @@ bfa_ioc_send_enable(struct bfa_ioc *ioc)
1475 1514
1476 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1515 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1477 bfa_ioc_portid(ioc)); 1516 bfa_ioc_portid(ioc));
1478 enable_req.ioc_class = ioc->ioc_mc; 1517 enable_req.clscode = htons(ioc->clscode);
1479 do_gettimeofday(&tv); 1518 do_gettimeofday(&tv);
1480 enable_req.tv_sec = ntohl(tv.tv_sec); 1519 enable_req.tv_sec = ntohl(tv.tv_sec);
1481 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1520 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
@@ -1548,22 +1587,23 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1548 u32 loff = 0; 1587 u32 loff = 0;
1549 u32 chunkno = 0; 1588 u32 chunkno = 0;
1550 u32 i; 1589 u32 i;
1590 u32 asicmode;
1551 1591
1552 /** 1592 /**
1553 * Initialize LMEM first before code download 1593 * Initialize LMEM first before code download
1554 */ 1594 */
1555 bfa_ioc_lmem_init(ioc); 1595 bfa_ioc_lmem_init(ioc);
1556 1596
1557 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); 1597 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1558 1598
1559 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1599 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1560 1600
1561 writel(pgnum, ioc->ioc_regs.host_page_num_fn); 1601 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1562 1602
1563 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { 1603 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1564 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { 1604 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1565 chunkno = BFA_IOC_FLASH_CHUNK_NO(i); 1605 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1566 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 1606 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1567 BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); 1607 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1568 } 1608 }
1569 1609
@@ -1590,12 +1630,16 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1590 ioc->ioc_regs.host_page_num_fn); 1630 ioc->ioc_regs.host_page_num_fn);
1591 1631
1592 /* 1632 /*
1593 * Set boot type and boot param at the end. 1633 * Set boot type, env and device mode at the end.
1594 */ 1634 */
1635 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1636 ioc->port0_mode, ioc->port1_mode);
1637 writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1638 + BFI_FWBOOT_DEVMODE_OFF));
1595 writel(boot_type, ((ioc->ioc_regs.smem_page_start) 1639 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1596 + (BFI_BOOT_TYPE_OFF))); 1640 + (BFI_FWBOOT_TYPE_OFF)));
1597 writel(boot_env, ((ioc->ioc_regs.smem_page_start) 1641 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1598 + (BFI_BOOT_LOADER_OFF))); 1642 + (BFI_FWBOOT_ENV_OFF)));
1599} 1643}
1600 1644
1601static void 1645static void
@@ -1605,6 +1649,20 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1605} 1649}
1606 1650
1607/** 1651/**
1652 * BFA ioc enable reply by firmware
1653 */
1654static void
1655bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1656 u8 cap_bm)
1657{
1658 struct bfa_iocpf *iocpf = &ioc->iocpf;
1659
1660 ioc->port_mode = ioc->port_mode_cfg = port_mode;
1661 ioc->ad_cap_bm = cap_bm;
1662 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1663}
1664
1665/**
1608 * @brief 1666 * @brief
1609 * Update BFA configuration from firmware configuration. 1667 * Update BFA configuration from firmware configuration.
1610 */ 1668 */
@@ -1644,7 +1702,9 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1644{ 1702{
1645 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 1703 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1646 struct bfa_mbox_cmd *cmd; 1704 struct bfa_mbox_cmd *cmd;
1647 u32 stat; 1705 bfa_mbox_cmd_cbfn_t cbfn;
1706 void *cbarg;
1707 u32 stat;
1648 1708
1649 /** 1709 /**
1650 * If no command pending, do nothing 1710 * If no command pending, do nothing
@@ -1664,6 +1724,16 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1664 */ 1724 */
1665 bfa_q_deq(&mod->cmd_q, &cmd); 1725 bfa_q_deq(&mod->cmd_q, &cmd);
1666 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); 1726 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1727
1728 /**
1729 * Give a callback to the client, indicating that the command is sent
1730 */
1731 if (cmd->cbfn) {
1732 cbfn = cmd->cbfn;
1733 cbarg = cmd->cbarg;
1734 cmd->cbfn = NULL;
1735 cbfn(cbarg);
1736 }
1667} 1737}
1668 1738
1669/** 1739/**
@@ -1702,15 +1772,15 @@ bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1702} 1772}
1703 1773
1704static void 1774static void
1705bfa_ioc_pf_initfailed(struct bfa_ioc *ioc) 1775bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1706{ 1776{
1707 bfa_fsm_send_event(ioc, IOC_E_INITFAILED); 1777 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1708} 1778}
1709 1779
1710static void 1780static void
1711bfa_ioc_pf_failed(struct bfa_ioc *ioc) 1781bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1712{ 1782{
1713 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 1783 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1714} 1784}
1715 1785
1716static void 1786static void
@@ -1749,10 +1819,9 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1749 * as the entry vector. 1819 * as the entry vector.
1750 */ 1820 */
1751static void 1821static void
1752bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env) 1822bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1823 u32 boot_env)
1753{ 1824{
1754 void __iomem *rb;
1755
1756 bfa_ioc_stats(ioc, ioc_boots); 1825 bfa_ioc_stats(ioc, ioc_boots);
1757 1826
1758 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) 1827 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
@@ -1761,22 +1830,16 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
1761 /** 1830 /**
1762 * Initialize IOC state of all functions on a chip reset. 1831 * Initialize IOC state of all functions on a chip reset.
1763 */ 1832 */
1764 rb = ioc->pcidev.pci_bar_kva; 1833 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1765 if (boot_type == BFI_BOOT_TYPE_MEMTEST) { 1834 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1766 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); 1835 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1767 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1768 } else { 1836 } else {
1769 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG)); 1837 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1770 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG)); 1838 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1771 } 1839 }
1772 1840
1773 bfa_ioc_msgflush(ioc); 1841 bfa_ioc_msgflush(ioc);
1774 bfa_ioc_download_fw(ioc, boot_type, boot_env); 1842 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1775
1776 /**
1777 * Enable interrupts just before starting LPU
1778 */
1779 ioc->cbfn->reset_cbfn(ioc->bfa);
1780 bfa_ioc_lpu_start(ioc); 1843 bfa_ioc_lpu_start(ioc);
1781} 1844}
1782 1845
@@ -1789,13 +1852,17 @@ bfa_nw_ioc_auto_recover(bool auto_recover)
1789 bfa_nw_auto_recover = auto_recover; 1852 bfa_nw_auto_recover = auto_recover;
1790} 1853}
1791 1854
1792static void 1855static bool
1793bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 1856bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1794{ 1857{
1795 u32 *msgp = mbmsg; 1858 u32 *msgp = mbmsg;
1796 u32 r32; 1859 u32 r32;
1797 int i; 1860 int i;
1798 1861
1862 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1863 if ((r32 & 1) == 0)
1864 return false;
1865
1799 /** 1866 /**
1800 * read the MBOX msg 1867 * read the MBOX msg
1801 */ 1868 */
@@ -1811,6 +1878,8 @@ bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1811 */ 1878 */
1812 writel(1, ioc->ioc_regs.lpu_mbox_cmd); 1879 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1813 readl(ioc->ioc_regs.lpu_mbox_cmd); 1880 readl(ioc->ioc_regs.lpu_mbox_cmd);
1881
1882 return true;
1814} 1883}
1815 1884
1816static void 1885static void
@@ -1827,12 +1896,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1827 case BFI_IOC_I2H_HBEAT: 1896 case BFI_IOC_I2H_HBEAT:
1828 break; 1897 break;
1829 1898
1830 case BFI_IOC_I2H_READY_EVENT:
1831 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1832 break;
1833
1834 case BFI_IOC_I2H_ENABLE_REPLY: 1899 case BFI_IOC_I2H_ENABLE_REPLY:
1835 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); 1900 bfa_ioc_enable_reply(ioc,
1901 (enum bfa_mode)msg->fw_event.port_mode,
1902 msg->fw_event.cap_bm);
1836 break; 1903 break;
1837 1904
1838 case BFI_IOC_I2H_DISABLE_REPLY: 1905 case BFI_IOC_I2H_DISABLE_REPLY:
@@ -1878,6 +1945,9 @@ void
1878bfa_nw_ioc_detach(struct bfa_ioc *ioc) 1945bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1879{ 1946{
1880 bfa_fsm_send_event(ioc, IOC_E_DETACH); 1947 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1948
1949 /* Done with detach, empty the notify_q. */
1950 INIT_LIST_HEAD(&ioc->notify_q);
1881} 1951}
1882 1952
1883/** 1953/**
@@ -1887,12 +1957,29 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1887 */ 1957 */
1888void 1958void
1889bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 1959bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1890 enum bfi_mclass mc) 1960 enum bfi_pcifn_class clscode)
1891{ 1961{
1892 ioc->ioc_mc = mc; 1962 ioc->clscode = clscode;
1893 ioc->pcidev = *pcidev; 1963 ioc->pcidev = *pcidev;
1894 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 1964
1895 ioc->cna = ioc->ctdev && !ioc->fcmode; 1965 /**
1966 * Initialize IOC and device personality
1967 */
1968 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
1969 ioc->asic_mode = BFI_ASIC_MODE_FC;
1970
1971 switch (pcidev->device_id) {
1972 case PCI_DEVICE_ID_BROCADE_CT:
1973 ioc->asic_gen = BFI_ASIC_GEN_CT;
1974 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1975 ioc->asic_mode = BFI_ASIC_MODE_ETH;
1976 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
1977 ioc->ad_cap_bm = BFA_CM_CNA;
1978 break;
1979
1980 default:
1981 BUG_ON(1);
1982 }
1896 1983
1897 bfa_nw_ioc_set_ct_hwif(ioc); 1984 bfa_nw_ioc_set_ct_hwif(ioc);
1898 1985
@@ -2013,21 +2100,28 @@ bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2013 struct bfi_mbmsg m; 2100 struct bfi_mbmsg m;
2014 int mc; 2101 int mc;
2015 2102
2016 bfa_ioc_msgget(ioc, &m); 2103 if (bfa_ioc_msgget(ioc, &m)) {
2104 /**
2105 * Treat IOC message class as special.
2106 */
2107 mc = m.mh.msg_class;
2108 if (mc == BFI_MC_IOC) {
2109 bfa_ioc_isr(ioc, &m);
2110 return;
2111 }
2017 2112
2018 /** 2113 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2019 * Treat IOC message class as special. 2114 return;
2020 */ 2115
2021 mc = m.mh.msg_class; 2116 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2022 if (mc == BFI_MC_IOC) {
2023 bfa_ioc_isr(ioc, &m);
2024 return;
2025 } 2117 }
2026 2118
2027 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 2119 bfa_ioc_lpu_read_stat(ioc);
2028 return;
2029 2120
2030 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 2121 /**
2122 * Try to send pending mailbox commands
2123 */
2124 bfa_ioc_mbox_poll(ioc);
2031} 2125}
2032 2126
2033void 2127void
@@ -2099,24 +2193,18 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2099 ad_attr->asic_rev = ioc_attr->asic_rev; 2193 ad_attr->asic_rev = ioc_attr->asic_rev;
2100 2194
2101 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); 2195 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2102
2103 ad_attr->cna_capable = ioc->cna;
2104 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2105} 2196}
2106 2197
2107static enum bfa_ioc_type 2198static enum bfa_ioc_type
2108bfa_ioc_get_type(struct bfa_ioc *ioc) 2199bfa_ioc_get_type(struct bfa_ioc *ioc)
2109{ 2200{
2110 if (!ioc->ctdev || ioc->fcmode) 2201 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2111 return BFA_IOC_TYPE_FC;
2112 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2113 return BFA_IOC_TYPE_FCoE;
2114 else if (ioc->ioc_mc == BFI_MC_LL)
2115 return BFA_IOC_TYPE_LL;
2116 else {
2117 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
2118 return BFA_IOC_TYPE_LL; 2202 return BFA_IOC_TYPE_LL;
2119 } 2203
2204 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2205
2206 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2207 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2120} 2208}
2121 2209
2122static void 2210static void
@@ -2228,6 +2316,10 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2228 2316
2229 ioc_attr->state = bfa_ioc_get_state(ioc); 2317 ioc_attr->state = bfa_ioc_get_state(ioc);
2230 ioc_attr->port_id = ioc->port_id; 2318 ioc_attr->port_id = ioc->port_id;
2319 ioc_attr->port_mode = ioc->port_mode;
2320
2321 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2322 ioc_attr->cap_bm = ioc->ad_cap_bm;
2231 2323
2232 ioc_attr->ioc_type = bfa_ioc_get_type(ioc); 2324 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2233 2325
@@ -2317,8 +2409,14 @@ void
2317bfa_nw_iocpf_timeout(void *ioc_arg) 2409bfa_nw_iocpf_timeout(void *ioc_arg)
2318{ 2410{
2319 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 2411 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2412 enum bfa_iocpf_state iocpf_st;
2413
2414 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2320 2415
2321 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); 2416 if (iocpf_st == BFA_IOCPF_HWINIT)
2417 bfa_ioc_poll_fwinit(ioc);
2418 else
2419 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2322} 2420}
2323 2421
2324void 2422void
@@ -2328,3 +2426,22 @@ bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2328 2426
2329 bfa_ioc_hw_sem_get(ioc); 2427 bfa_ioc_hw_sem_get(ioc);
2330} 2428}
2429
2430static void
2431bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2432{
2433 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2434
2435 if (fwstate == BFI_IOC_DISABLED) {
2436 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2437 return;
2438 }
2439
2440 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2441 bfa_nw_iocpf_timeout(ioc);
2442 } else {
2443 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2444 mod_timer(&ioc->iocpf_timer, jiffies +
2445 msecs_to_jiffies(BFA_IOC_POLL_TOV));
2446 }
2447}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 33ba5f40ca37..7514c722ebc3 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -27,6 +27,7 @@
27#define BFA_IOC_HWSEM_TOV 500 /* msecs */ 27#define BFA_IOC_HWSEM_TOV 500 /* msecs */
28#define BFA_IOC_HB_TOV 500 /* msecs */ 28#define BFA_IOC_HB_TOV 500 /* msecs */
29#define BFA_IOC_HWINIT_MAX 5 29#define BFA_IOC_HWINIT_MAX 5
30#define BFA_IOC_POLL_TOV 200 /* msecs */
30 31
31/** 32/**
32 * PCI device information required by IOC 33 * PCI device information required by IOC
@@ -169,8 +170,9 @@ struct bfa_ioc_hbfail_notify {
169struct bfa_iocpf { 170struct bfa_iocpf {
170 bfa_fsm_t fsm; 171 bfa_fsm_t fsm;
171 struct bfa_ioc *ioc; 172 struct bfa_ioc *ioc;
172 u32 retry_count; 173 bool fw_mismatch_notified;
173 bool auto_recover; 174 bool auto_recover;
175 u32 poll_time;
174}; 176};
175 177
176struct bfa_ioc { 178struct bfa_ioc {
@@ -186,12 +188,10 @@ struct bfa_ioc {
186 void *dbg_fwsave; 188 void *dbg_fwsave;
187 int dbg_fwsave_len; 189 int dbg_fwsave_len;
188 bool dbg_fwsave_once; 190 bool dbg_fwsave_once;
189 enum bfi_mclass ioc_mc; 191 enum bfi_pcifn_class clscode;
190 struct bfa_ioc_regs ioc_regs; 192 struct bfa_ioc_regs ioc_regs;
191 struct bfa_ioc_drv_stats stats; 193 struct bfa_ioc_drv_stats stats;
192 bool fcmode; 194 bool fcmode;
193 bool ctdev;
194 bool cna;
195 bool pllinit; 195 bool pllinit;
196 bool stats_busy; /*!< outstanding stats */ 196 bool stats_busy; /*!< outstanding stats */
197 u8 port_id; 197 u8 port_id;
@@ -202,10 +202,18 @@ struct bfa_ioc {
202 struct bfa_ioc_mbox_mod mbox_mod; 202 struct bfa_ioc_mbox_mod mbox_mod;
203 struct bfa_ioc_hwif *ioc_hwif; 203 struct bfa_ioc_hwif *ioc_hwif;
204 struct bfa_iocpf iocpf; 204 struct bfa_iocpf iocpf;
205 enum bfi_asic_gen asic_gen;
206 enum bfi_asic_mode asic_mode;
207 enum bfi_port_mode port0_mode;
208 enum bfi_port_mode port1_mode;
209 enum bfa_mode port_mode;
210 u8 ad_cap_bm; /*!< adapter cap bit mask */
211 u8 port_mode_cfg; /*!< config port mode */
205}; 212};
206 213
207struct bfa_ioc_hwif { 214struct bfa_ioc_hwif {
208 enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode); 215 enum bfa_status (*ioc_pll_init) (void __iomem *rb,
216 enum bfi_asic_mode m);
209 bool (*ioc_firmware_lock) (struct bfa_ioc *ioc); 217 bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
210 void (*ioc_firmware_unlock) (struct bfa_ioc *ioc); 218 void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
211 void (*ioc_reg_init) (struct bfa_ioc *ioc); 219 void (*ioc_reg_init) (struct bfa_ioc *ioc);
@@ -219,12 +227,14 @@ struct bfa_ioc_hwif {
219 void (*ioc_sync_leave) (struct bfa_ioc *ioc); 227 void (*ioc_sync_leave) (struct bfa_ioc *ioc);
220 void (*ioc_sync_ack) (struct bfa_ioc *ioc); 228 void (*ioc_sync_ack) (struct bfa_ioc *ioc);
221 bool (*ioc_sync_complete) (struct bfa_ioc *ioc); 229 bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
230 bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc);
222}; 231};
223 232
224#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 233#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
225#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) 234#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
226#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) 235#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
227#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) 236#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
237#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
228#define bfa_ioc_fetch_stats(__ioc, __stats) \ 238#define bfa_ioc_fetch_stats(__ioc, __stats) \
229 (((__stats)->drv_stats) = (__ioc)->stats) 239 (((__stats)->drv_stats) = (__ioc)->stats)
230#define bfa_ioc_clr_stats(__ioc) \ 240#define bfa_ioc_clr_stats(__ioc) \
@@ -245,7 +255,8 @@ struct bfa_ioc_hwif {
245 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \ 255 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
246 BFI_IMAGE_CB_FC) 256 BFI_IMAGE_CB_FC)
247#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ 257#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
248 (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE) 258 ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
259 ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
249#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 260#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
250#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 261#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
251#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 262#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
@@ -266,13 +277,18 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
266 277
267#define bfa_ioc_pll_init_asic(__ioc) \ 278#define bfa_ioc_pll_init_asic(__ioc) \
268 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ 279 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
269 (__ioc)->fcmode)) 280 (__ioc)->asic_mode))
270 281
271#define bfa_ioc_isr_mode_set(__ioc, __msix) \ 282#define bfa_ioc_isr_mode_set(__ioc, __msix) \
272 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) 283 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
273#define bfa_ioc_ownership_reset(__ioc) \ 284#define bfa_ioc_ownership_reset(__ioc) \
274 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) 285 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
275 286
287#define bfa_ioc_lpu_read_stat(__ioc) do { \
288 if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
289 ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
290} while (0)
291
276void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc); 292void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
277 293
278void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, 294void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
@@ -280,7 +296,7 @@ void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
280void bfa_nw_ioc_auto_recover(bool auto_recover); 296void bfa_nw_ioc_auto_recover(bool auto_recover);
281void bfa_nw_ioc_detach(struct bfa_ioc *ioc); 297void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
282void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 298void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
283 enum bfi_mclass mc); 299 enum bfi_pcifn_class clscode);
284u32 bfa_nw_ioc_meminfo(void); 300u32 bfa_nw_ioc_meminfo(void);
285void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); 301void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
286void bfa_nw_ioc_enable(struct bfa_ioc *ioc); 302void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
@@ -311,7 +327,7 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
311/* 327/*
312 * F/W Image Size & Chunk 328 * F/W Image Size & Chunk
313 */ 329 */
314u32 *bfa_cb_image_get_chunk(int type, u32 off); 330u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
315u32 bfa_cb_image_get_size(int type); 331u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
316 332
317#endif /* __BFA_IOC_H__ */ 333#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 209f1f320343..b4429bc67c34 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -46,7 +46,8 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
46static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); 46static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
47static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); 47static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
48static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); 48static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
49static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); 49static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
50 enum bfi_asic_mode asic_mode);
50 51
51static struct bfa_ioc_hwif nw_hwif_ct; 52static struct bfa_ioc_hwif nw_hwif_ct;
52 53
@@ -92,7 +93,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
92 /** 93 /**
93 * If bios boot (flash based) -- do not increment usage count 94 * If bios boot (flash based) -- do not increment usage count
94 */ 95 */
95 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 96 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
96 BFA_IOC_FWIMG_MINSZ) 97 BFA_IOC_FWIMG_MINSZ)
97 return true; 98 return true;
98 99
@@ -142,7 +143,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
142 /** 143 /**
143 * If bios boot (flash based) -- do not decrement usage count 144 * If bios boot (flash based) -- do not decrement usage count
144 */ 145 */
145 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < 146 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
146 BFA_IOC_FWIMG_MINSZ) 147 BFA_IOC_FWIMG_MINSZ)
147 return; 148 return;
148 149
@@ -165,22 +166,17 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
165static void 166static void
166bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) 167bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
167{ 168{
168 if (ioc->cna) { 169 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
169 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 170 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
170 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); 171 /* Wait for halt to take effect */
171 /* Wait for halt to take effect */ 172 readl(ioc->ioc_regs.ll_halt);
172 readl(ioc->ioc_regs.ll_halt); 173 readl(ioc->ioc_regs.alt_ll_halt);
173 readl(ioc->ioc_regs.alt_ll_halt);
174 } else {
175 writel(~0U, ioc->ioc_regs.err_set);
176 readl(ioc->ioc_regs.err_set);
177 }
178} 174}
179 175
180/** 176/**
181 * Host to LPU mailbox message addresses 177 * Host to LPU mailbox message addresses
182 */ 178 */
183static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { 179static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
184 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 180 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
185 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 181 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
186 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, 182 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
@@ -215,9 +211,9 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
215 211
216 rb = bfa_ioc_bar0(ioc); 212 rb = bfa_ioc_bar0(ioc);
217 213
218 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; 214 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
219 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; 215 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
220 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; 216 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
221 217
222 if (ioc->port_id == 0) { 218 if (ioc->port_id == 0) {
223 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 219 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
@@ -323,11 +319,9 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
323static void 319static void
324bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) 320bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
325{ 321{
326 if (ioc->cna) { 322 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
327 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 323 writel(0, ioc->ioc_regs.ioc_usage_reg);
328 writel(0, ioc->ioc_regs.ioc_usage_reg); 324 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
329 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
330 }
331 325
332 /* 326 /*
333 * Read the hw sem reg to make sure that it is locked 327 * Read the hw sem reg to make sure that it is locked
@@ -436,9 +430,10 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
436} 430}
437 431
438static enum bfa_status 432static enum bfa_status
439bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) 433bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
440{ 434{
441 u32 pll_sclk, pll_fclk, r32; 435 u32 pll_sclk, pll_fclk, r32;
436 bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
442 437
443 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | 438 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
444 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | 439 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 6a53183e411e..978e1bc12dc1 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -43,17 +43,21 @@ struct bfi_mhdr {
43 u8 msg_id; /*!< msg opcode with in the class */ 43 u8 msg_id; /*!< msg opcode with in the class */
44 union { 44 union {
45 struct { 45 struct {
46 u8 rsvd; 46 u8 qid;
47 u8 lpu_id; /*!< msg destination */ 47 u8 fn_lpu; /*!< msg destination */
48 } h2i; 48 } h2i;
49 u16 i2htok; /*!< token in msgs to host */ 49 u16 i2htok; /*!< token in msgs to host */
50 } mtag; 50 } mtag;
51}; 51};
52 52
53#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \ 53#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
54#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
55#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid)
56
57#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
54 (_mh).msg_class = (_mc); \ 58 (_mh).msg_class = (_mc); \
55 (_mh).msg_id = (_op); \ 59 (_mh).msg_id = (_op); \
56 (_mh).mtag.h2i.lpu_id = (_lpuid); \ 60 (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \
57} while (0) 61} while (0)
58 62
59#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ 63#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
@@ -149,6 +153,14 @@ struct bfi_mbmsg {
149}; 153};
150 154
151/** 155/**
156 * Supported PCI function class codes (personality)
157 */
158enum bfi_pcifn_class {
159 BFI_PCIFN_CLASS_FC = 0x0c04,
160 BFI_PCIFN_CLASS_ETH = 0x0200,
161};
162
163/**
152 * Message Classes 164 * Message Classes
153 */ 165 */
154enum bfi_mclass { 166enum bfi_mclass {
@@ -203,6 +215,21 @@ enum bfi_mclass {
203 *---------------------------------------------------------------------- 215 *----------------------------------------------------------------------
204 */ 216 */
205 217
218/**
219 * Different asic generations
220 */
221enum bfi_asic_gen {
222 BFI_ASIC_GEN_CB = 1,
223 BFI_ASIC_GEN_CT = 2,
224};
225
226enum bfi_asic_mode {
227 BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
228 BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
229 BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
230 BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
231};
232
206enum bfi_ioc_h2i_msgs { 233enum bfi_ioc_h2i_msgs {
207 BFI_IOC_H2I_ENABLE_REQ = 1, 234 BFI_IOC_H2I_ENABLE_REQ = 1,
208 BFI_IOC_H2I_DISABLE_REQ = 2, 235 BFI_IOC_H2I_DISABLE_REQ = 2,
@@ -215,8 +242,7 @@ enum bfi_ioc_i2h_msgs {
215 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), 242 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
216 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), 243 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
217 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), 244 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
218 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4), 245 BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
219 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
220}; 246};
221 247
222/** 248/**
@@ -231,7 +257,8 @@ struct bfi_ioc_attr {
231 u64 mfg_pwwn; /*!< Mfg port wwn */ 257 u64 mfg_pwwn; /*!< Mfg port wwn */
232 u64 mfg_nwwn; /*!< Mfg node wwn */ 258 u64 mfg_nwwn; /*!< Mfg node wwn */
233 mac_t mfg_mac; /*!< Mfg mac */ 259 mac_t mfg_mac; /*!< Mfg mac */
234 u16 rsvd_a; 260 u8 port_mode; /* enum bfi_port_mode */
261 u8 rsvd_a;
235 u64 pwwn; 262 u64 pwwn;
236 u64 nwwn; 263 u64 nwwn;
237 mac_t mac; /*!< PBC or Mfg mac */ 264 mac_t mac; /*!< PBC or Mfg mac */
@@ -284,19 +311,36 @@ struct bfi_ioc_getattr_reply {
284#define BFI_IOC_MD5SUM_SZ 4 311#define BFI_IOC_MD5SUM_SZ 4
285struct bfi_ioc_image_hdr { 312struct bfi_ioc_image_hdr {
286 u32 signature; /*!< constant signature */ 313 u32 signature; /*!< constant signature */
287 u32 rsvd_a; 314 u8 asic_gen; /*!< asic generation */
315 u8 asic_mode;
316 u8 port0_mode; /*!< device mode for port 0 */
317 u8 port1_mode; /*!< device mode for port 1 */
288 u32 exec; /*!< exec vector */ 318 u32 exec; /*!< exec vector */
289 u32 param; /*!< parameters */ 319 u32 bootenv; /*!< firmware boot env */
290 u32 rsvd_b[4]; 320 u32 rsvd_b[4];
291 u32 md5sum[BFI_IOC_MD5SUM_SZ]; 321 u32 md5sum[BFI_IOC_MD5SUM_SZ];
292}; 322};
293 323
324#define BFI_FWBOOT_DEVMODE_OFF 4
325#define BFI_FWBOOT_TYPE_OFF 8
326#define BFI_FWBOOT_ENV_OFF 12
327#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
328 (((u32)(__asic_gen)) << 24 | \
329 ((u32)(__asic_mode)) << 16 | \
330 ((u32)(__p0_mode)) << 8 | \
331 ((u32)(__p1_mode)))
332
294enum bfi_fwboot_type { 333enum bfi_fwboot_type {
295 BFI_FWBOOT_TYPE_NORMAL = 0, 334 BFI_FWBOOT_TYPE_NORMAL = 0,
296 BFI_FWBOOT_TYPE_FLASH = 1, 335 BFI_FWBOOT_TYPE_FLASH = 1,
297 BFI_FWBOOT_TYPE_MEMTEST = 2, 336 BFI_FWBOOT_TYPE_MEMTEST = 2,
298}; 337};
299 338
339enum bfi_port_mode {
340 BFI_PORT_MODE_FC = 1,
341 BFI_PORT_MODE_ETH = 2,
342};
343
300/** 344/**
301 * BFI_IOC_I2H_READY_EVENT message 345 * BFI_IOC_I2H_READY_EVENT message
302 */ 346 */
@@ -362,8 +406,8 @@ enum {
362 */ 406 */
363struct bfi_ioc_ctrl_req { 407struct bfi_ioc_ctrl_req {
364 struct bfi_mhdr mh; 408 struct bfi_mhdr mh;
365 u8 ioc_class; 409 u16 clscode;
366 u8 rsvd[3]; 410 u16 rsvd;
367 u32 tv_sec; 411 u32 tv_sec;
368}; 412};
369 413
@@ -371,9 +415,11 @@ struct bfi_ioc_ctrl_req {
371 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages 415 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
372 */ 416 */
373struct bfi_ioc_ctrl_reply { 417struct bfi_ioc_ctrl_reply {
374 struct bfi_mhdr mh; /*!< Common msg header */ 418 struct bfi_mhdr mh; /*!< Common msg header */
375 u8 status; /*!< enable/disable status */ 419 u8 status; /*!< enable/disable status */
376 u8 rsvd[3]; 420 u8 port_mode; /*!< enum bfa_mode */
421 u8 cap_bm; /*!< capability bit mask */
422 u8 rsvd;
377}; 423};
378 424
379#define BFI_IOC_MSGSZ 8 425#define BFI_IOC_MSGSZ 8
@@ -393,7 +439,7 @@ union bfi_ioc_h2i_msg_u {
393 */ 439 */
394union bfi_ioc_i2h_msg_u { 440union bfi_ioc_i2h_msg_u {
395 struct bfi_mhdr mh; 441 struct bfi_mhdr mh;
396 struct bfi_ioc_rdy_event rdy_event; 442 struct bfi_ioc_ctrl_reply fw_event;
397 u32 mboxmsg[BFI_IOC_MSGSZ]; 443 u32 mboxmsg[BFI_IOC_MSGSZ];
398}; 444};
399 445
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 21e9155d6e56..f9781a3d559b 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -40,7 +40,7 @@ do { \
40 (_qe)->cbarg = (_cbarg); \ 40 (_qe)->cbarg = (_cbarg); \
41} while (0) 41} while (0)
42 42
43#define bna_is_small_rxq(rcb) ((rcb)->id == 1) 43#define bna_is_small_rxq(_id) ((_id) & 0x1)
44 44
45#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \ 45#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
46 (!memcmp((_mac1), (_mac2), sizeof(mac_t))) 46 (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
@@ -214,38 +214,59 @@ do { \
214 } \ 214 } \
215} while (0) 215} while (0)
216 216
217#define call_rxf_stop_cbfn(rxf, status) \ 217#define call_rxf_stop_cbfn(rxf) \
218do { \
218 if ((rxf)->stop_cbfn) { \ 219 if ((rxf)->stop_cbfn) { \
219 (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \ 220 void (*cbfn)(struct bna_rx *); \
221 struct bna_rx *cbarg; \
222 cbfn = (rxf)->stop_cbfn; \
223 cbarg = (rxf)->stop_cbarg; \
220 (rxf)->stop_cbfn = NULL; \ 224 (rxf)->stop_cbfn = NULL; \
221 (rxf)->stop_cbarg = NULL; \ 225 (rxf)->stop_cbarg = NULL; \
222 } 226 cbfn(cbarg); \
227 } \
228} while (0)
223 229
224#define call_rxf_start_cbfn(rxf, status) \ 230#define call_rxf_start_cbfn(rxf) \
231do { \
225 if ((rxf)->start_cbfn) { \ 232 if ((rxf)->start_cbfn) { \
226 (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \ 233 void (*cbfn)(struct bna_rx *); \
234 struct bna_rx *cbarg; \
235 cbfn = (rxf)->start_cbfn; \
236 cbarg = (rxf)->start_cbarg; \
227 (rxf)->start_cbfn = NULL; \ 237 (rxf)->start_cbfn = NULL; \
228 (rxf)->start_cbarg = NULL; \ 238 (rxf)->start_cbarg = NULL; \
229 } 239 cbfn(cbarg); \
240 } \
241} while (0)
230 242
231#define call_rxf_cam_fltr_cbfn(rxf, status) \ 243#define call_rxf_cam_fltr_cbfn(rxf) \
244do { \
232 if ((rxf)->cam_fltr_cbfn) { \ 245 if ((rxf)->cam_fltr_cbfn) { \
233 (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \ 246 void (*cbfn)(struct bnad *, struct bna_rx *); \
234 (status)); \ 247 struct bnad *cbarg; \
248 cbfn = (rxf)->cam_fltr_cbfn; \
249 cbarg = (rxf)->cam_fltr_cbarg; \
235 (rxf)->cam_fltr_cbfn = NULL; \ 250 (rxf)->cam_fltr_cbfn = NULL; \
236 (rxf)->cam_fltr_cbarg = NULL; \ 251 (rxf)->cam_fltr_cbarg = NULL; \
237 } 252 cbfn(cbarg, rxf->rx); \
253 } \
254} while (0)
238 255
239#define call_rxf_pause_cbfn(rxf, status) \ 256#define call_rxf_pause_cbfn(rxf) \
257do { \
240 if ((rxf)->oper_state_cbfn) { \ 258 if ((rxf)->oper_state_cbfn) { \
241 (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\ 259 void (*cbfn)(struct bnad *, struct bna_rx *); \
242 (status)); \ 260 struct bnad *cbarg; \
243 (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \ 261 cbfn = (rxf)->oper_state_cbfn; \
262 cbarg = (rxf)->oper_state_cbarg; \
244 (rxf)->oper_state_cbfn = NULL; \ 263 (rxf)->oper_state_cbfn = NULL; \
245 (rxf)->oper_state_cbarg = NULL; \ 264 (rxf)->oper_state_cbarg = NULL; \
246 } 265 cbfn(cbarg, rxf->rx); \
266 } \
267} while (0)
247 268
248#define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status) 269#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
249 270
250#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx)) 271#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
251 272
@@ -331,6 +352,61 @@ do { \
331 } \ 352 } \
332} while (0) 353} while (0)
333 354
355#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
356
357#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
358
359#define bna_tx_from_rid(_bna, _rid, _tx) \
360do { \
361 struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
362 struct bna_tx *__tx; \
363 struct list_head *qe; \
364 _tx = NULL; \
365 list_for_each(qe, &__tx_mod->tx_active_q) { \
366 __tx = (struct bna_tx *)qe; \
367 if (__tx->rid == (_rid)) { \
368 (_tx) = __tx; \
369 break; \
370 } \
371 } \
372} while (0)
373
374#define bna_rx_from_rid(_bna, _rid, _rx) \
375do { \
376 struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
377 struct bna_rx *__rx; \
378 struct list_head *qe; \
379 _rx = NULL; \
380 list_for_each(qe, &__rx_mod->rx_active_q) { \
381 __rx = (struct bna_rx *)qe; \
382 if (__rx->rid == (_rid)) { \
383 (_rx) = __rx; \
384 break; \
385 } \
386 } \
387} while (0)
388
389/**
390 *
391 * Inline functions
392 *
393 */
394
395static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
396{
397 struct bna_mac *mac = NULL;
398 struct list_head *qe;
399 list_for_each(qe, q) {
400 if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
401 mac = (struct bna_mac *)qe;
402 break;
403 }
404 }
405 return mac;
406}
407
408#define bna_attr(_bna) (&(_bna)->ioceth.attr)
409
334/** 410/**
335 * 411 *
336 * Function prototypes 412 * Function prototypes
@@ -341,14 +417,22 @@ do { \
341 * BNA 417 * BNA
342 */ 418 */
343 419
420/* FW response handlers */
421void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
422
344/* APIs for BNAD */ 423/* APIs for BNAD */
345void bna_res_req(struct bna_res_info *res_info); 424void bna_res_req(struct bna_res_info *res_info);
425void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
346void bna_init(struct bna *bna, struct bnad *bnad, 426void bna_init(struct bna *bna, struct bnad *bnad,
347 struct bfa_pcidev *pcidev, 427 struct bfa_pcidev *pcidev,
348 struct bna_res_info *res_info); 428 struct bna_res_info *res_info);
429void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
349void bna_uninit(struct bna *bna); 430void bna_uninit(struct bna *bna);
431int bna_num_txq_set(struct bna *bna, int num_txq);
432int bna_num_rxp_set(struct bna *bna, int num_rxp);
350void bna_stats_get(struct bna *bna); 433void bna_stats_get(struct bna *bna);
351void bna_get_perm_mac(struct bna *bna, u8 *mac); 434void bna_get_perm_mac(struct bna *bna, u8 *mac);
435void bna_hw_stats_get(struct bna *bna);
352 436
353/* APIs for Rx */ 437/* APIs for Rx */
354int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size); 438int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
@@ -360,6 +444,9 @@ void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
360struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod); 444struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
361void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, 445void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
362 struct bna_mac *mac); 446 struct bna_mac *mac);
447struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
448void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
449 struct bna_mcam_handle *handle);
363struct bna_rit_segment * 450struct bna_rit_segment *
364bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size); 451bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
365void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod, 452void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
@@ -409,6 +496,14 @@ void bna_port_cb_rx_stopped(struct bna_port *port,
409 enum bna_cb_status status); 496 enum bna_cb_status status);
410 497
411/** 498/**
499 * ETHPORT
500 */
501
502/* Callbacks for RX */
503void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
504void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
505
506/**
412 * IB 507 * IB
413 */ 508 */
414 509
@@ -420,6 +515,12 @@ void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
420/** 515/**
421 * TX MODULE AND TX 516 * TX MODULE AND TX
422 */ 517 */
518/* FW response handelrs */
519void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
520 struct bfi_msgq_mhdr *msghdr);
521void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
522 struct bfi_msgq_mhdr *msghdr);
523void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
423 524
424/* APIs for BNA */ 525/* APIs for BNA */
425void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, 526void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
@@ -427,7 +528,7 @@ void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
427void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod); 528void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
428int bna_tx_state_get(struct bna_tx *tx); 529int bna_tx_state_get(struct bna_tx *tx);
429 530
430/* APIs for PORT */ 531/* APIs for ENET */
431void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type); 532void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
432void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type); 533void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
433void bna_tx_mod_fail(struct bna_tx_mod *tx_mod); 534void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
@@ -444,8 +545,8 @@ struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
444void bna_tx_destroy(struct bna_tx *tx); 545void bna_tx_destroy(struct bna_tx *tx);
445void bna_tx_enable(struct bna_tx *tx); 546void bna_tx_enable(struct bna_tx *tx);
446void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, 547void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
447 void (*cbfn)(void *, struct bna_tx *, 548 void (*cbfn)(void *, struct bna_tx *));
448 enum bna_cb_status)); 549void bna_tx_cleanup_complete(struct bna_tx *tx);
449void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); 550void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
450 551
451/** 552/**
@@ -473,6 +574,15 @@ void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
473void rxf_reset_packet_filter_default(struct bna_rxf *rxf); 574void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
474void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf); 575void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
475 576
577/* FW response handlers */
578void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
579 struct bfi_msgq_mhdr *msghdr);
580void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
581 struct bfi_msgq_mhdr *msghdr);
582void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
583void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
584 struct bfi_msgq_mhdr *msghdr);
585
476/* APIs for BNA */ 586/* APIs for BNA */
477void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, 587void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
478 struct bna_res_info *res_info); 588 struct bna_res_info *res_info);
@@ -480,7 +590,7 @@ void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
480int bna_rx_state_get(struct bna_rx *rx); 590int bna_rx_state_get(struct bna_rx *rx);
481int bna_rxf_state_get(struct bna_rxf *rxf); 591int bna_rxf_state_get(struct bna_rxf *rxf);
482 592
483/* APIs for PORT */ 593/* APIs for ENET */
484void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type); 594void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
485void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type); 595void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
486void bna_rx_mod_fail(struct bna_rx_mod *rx_mod); 596void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
@@ -495,42 +605,84 @@ struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
495void bna_rx_destroy(struct bna_rx *rx); 605void bna_rx_destroy(struct bna_rx *rx);
496void bna_rx_enable(struct bna_rx *rx); 606void bna_rx_enable(struct bna_rx *rx);
497void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, 607void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
498 void (*cbfn)(void *, struct bna_rx *, 608 void (*cbfn)(void *, struct bna_rx *));
499 enum bna_cb_status)); 609void bna_rx_cleanup_complete(struct bna_rx *rx);
500void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); 610void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
501void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]); 611void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
502void bna_rx_dim_update(struct bna_ccb *ccb); 612void bna_rx_dim_update(struct bna_ccb *ccb);
503enum bna_cb_status 613enum bna_cb_status
504bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, 614bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
505 void (*cbfn)(struct bnad *, struct bna_rx *, 615 void (*cbfn)(struct bnad *, struct bna_rx *));
506 enum bna_cb_status)); 616enum bna_cb_status
617bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
618 void (*cbfn)(struct bnad *, struct bna_rx *));
619enum bna_cb_status
620bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
621 void (*cbfn)(struct bnad *, struct bna_rx *));
507enum bna_cb_status 622enum bna_cb_status
508bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, 623bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
509 void (*cbfn)(struct bnad *, struct bna_rx *, 624 void (*cbfn)(struct bnad *, struct bna_rx *));
510 enum bna_cb_status));
511enum bna_cb_status 625enum bna_cb_status
512bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, 626bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
513 void (*cbfn)(struct bnad *, struct bna_rx *, 627 void (*cbfn)(struct bnad *, struct bna_rx *));
514 enum bna_cb_status));
515enum bna_cb_status 628enum bna_cb_status
516bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, 629bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
517 enum bna_rxmode bitmask, 630 enum bna_rxmode bitmask,
518 void (*cbfn)(struct bnad *, struct bna_rx *, 631 void (*cbfn)(struct bnad *, struct bna_rx *));
519 enum bna_cb_status));
520void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); 632void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
521void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); 633void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
522void bna_rx_vlanfilter_enable(struct bna_rx *rx); 634void bna_rx_vlanfilter_enable(struct bna_rx *rx);
523void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config, 635void bna_rx_hds_enable(struct bna_rx *rx, struct bna_hds_config *hds_config,
524 void (*cbfn)(struct bnad *, struct bna_rx *, 636 void (*cbfn)(struct bnad *, struct bna_rx *));
525 enum bna_cb_status));
526void bna_rx_hds_disable(struct bna_rx *rx, 637void bna_rx_hds_disable(struct bna_rx *rx,
527 void (*cbfn)(struct bnad *, struct bna_rx *, 638 void (*cbfn)(struct bnad *, struct bna_rx *));
528 enum bna_cb_status)); 639
640/**
641 * ENET
642 */
643
644/* API for RX */
645int bna_enet_mtu_get(struct bna_enet *enet);
646
647/* Callbacks for TX, RX */
648void bna_enet_cb_tx_stopped(struct bna_enet *enet);
649void bna_enet_cb_rx_stopped(struct bna_enet *enet);
650
651/* API for BNAD */
652void bna_enet_enable(struct bna_enet *enet);
653void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
654 void (*cbfn)(void *));
655void bna_enet_pause_config(struct bna_enet *enet,
656 struct bna_pause_config *pause_config,
657 void (*cbfn)(struct bnad *));
658void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
659 void (*cbfn)(struct bnad *));
660void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
661
662/**
663 * IOCETH
664 */
665
666/* APIs for BNAD */
667void bna_ioceth_enable(struct bna_ioceth *ioceth);
668void bna_ioceth_disable(struct bna_ioceth *ioceth,
669 enum bna_cleanup_type type);
529 670
530/** 671/**
531 * BNAD 672 * BNAD
532 */ 673 */
533 674
675/* Callbacks for ENET */
676void bnad_cb_ethport_link_status(struct bnad *bnad,
677 enum bna_link_status status);
678
679/* Callbacks for IOCETH */
680void bnad_cb_ioceth_ready(struct bnad *bnad);
681void bnad_cb_ioceth_failed(struct bnad *bnad);
682void bnad_cb_ioceth_disabled(struct bnad *bnad);
683void bnad_cb_mbox_intr_enable(struct bnad *bnad);
684void bnad_cb_mbox_intr_disable(struct bnad *bnad);
685
534/* Callbacks for BNA */ 686/* Callbacks for BNA */
535void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, 687void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
536 struct bna_stats *stats); 688 struct bna_stats *stats);
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index 2f89cb235248..655eb140bf94 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -19,8 +19,10 @@
19#define __BNA_TYPES_H__ 19#define __BNA_TYPES_H__
20 20
21#include "cna.h" 21#include "cna.h"
22#include "bna_hw.h" 22#include "bna_hw_defs.h"
23#include "bfa_cee.h" 23#include "bfa_cee.h"
24#include "bfi_enet.h"
25#include "bfa_msgq.h"
24 26
25/** 27/**
26 * 28 *
@@ -28,6 +30,7 @@
28 * 30 *
29 */ 31 */
30 32
33struct bna_mcam_handle;
31struct bna_txq; 34struct bna_txq;
32struct bna_tx; 35struct bna_tx;
33struct bna_rxq; 36struct bna_rxq;
@@ -35,6 +38,7 @@ struct bna_cq;
35struct bna_rx; 38struct bna_rx;
36struct bna_rxf; 39struct bna_rxf;
37struct bna_port; 40struct bna_port;
41struct bna_enet;
38struct bna; 42struct bna;
39struct bnad; 43struct bnad;
40 44
@@ -104,13 +108,26 @@ enum bna_res_req_type {
104 BNA_RES_T_MAX 108 BNA_RES_T_MAX
105}; 109};
106 110
111enum bna_mod_res_req_type {
112 BNA_MOD_RES_MEM_T_TX_ARRAY = 0,
113 BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1,
114 BNA_MOD_RES_MEM_T_RX_ARRAY = 2,
115 BNA_MOD_RES_MEM_T_RXP_ARRAY = 3,
116 BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4,
117 BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5,
118 BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6,
119 BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7,
120 BNA_MOD_RES_T_MAX
121};
122
107enum bna_tx_res_req_type { 123enum bna_tx_res_req_type {
108 BNA_TX_RES_MEM_T_TCB = 0, 124 BNA_TX_RES_MEM_T_TCB = 0,
109 BNA_TX_RES_MEM_T_UNMAPQ = 1, 125 BNA_TX_RES_MEM_T_UNMAPQ = 1,
110 BNA_TX_RES_MEM_T_QPT = 2, 126 BNA_TX_RES_MEM_T_QPT = 2,
111 BNA_TX_RES_MEM_T_SWQPT = 3, 127 BNA_TX_RES_MEM_T_SWQPT = 3,
112 BNA_TX_RES_MEM_T_PAGE = 4, 128 BNA_TX_RES_MEM_T_PAGE = 4,
113 BNA_TX_RES_INTR_T_TXCMPL = 5, 129 BNA_TX_RES_MEM_T_IBIDX = 5,
130 BNA_TX_RES_INTR_T_TXCMPL = 6,
114 BNA_TX_RES_T_MAX, 131 BNA_TX_RES_T_MAX,
115}; 132};
116 133
@@ -127,8 +144,10 @@ enum bna_rx_mem_type {
127 BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */ 144 BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
128 BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */ 145 BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
129 BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */ 146 BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
130 BNA_RX_RES_T_INTR = 12, /* Rx interrupts */ 147 BNA_RX_RES_MEM_T_IBIDX = 12,
131 BNA_RX_RES_T_MAX = 13 148 BNA_RX_RES_MEM_T_RIT = 13,
149 BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
150 BNA_RX_RES_T_MAX = 15
132}; 151};
133 152
134enum bna_mbox_state { 153enum bna_mbox_state {
@@ -142,14 +161,15 @@ enum bna_tx_type {
142}; 161};
143 162
144enum bna_tx_flags { 163enum bna_tx_flags {
145 BNA_TX_F_PORT_STARTED = 1, 164 BNA_TX_F_ENET_STARTED = 1,
146 BNA_TX_F_ENABLED = 2, 165 BNA_TX_F_ENABLED = 2,
147 BNA_TX_F_PRIO_LOCK = 4, 166 BNA_TX_F_PRIO_CHANGED = 4,
167 BNA_TX_F_BW_UPDATED = 8,
148}; 168};
149 169
150enum bna_tx_mod_flags { 170enum bna_tx_mod_flags {
151 BNA_TX_MOD_F_PORT_STARTED = 1, 171 BNA_TX_MOD_F_ENET_STARTED = 1,
152 BNA_TX_MOD_F_PORT_LOOPBACK = 2, 172 BNA_TX_MOD_F_ENET_LOOPBACK = 2,
153}; 173};
154 174
155enum bna_rx_type { 175enum bna_rx_type {
@@ -165,16 +185,19 @@ enum bna_rxp_type {
165 185
166enum bna_rxmode { 186enum bna_rxmode {
167 BNA_RXMODE_PROMISC = 1, 187 BNA_RXMODE_PROMISC = 1,
168 BNA_RXMODE_ALLMULTI = 2 188 BNA_RXMODE_DEFAULT = 2,
189 BNA_RXMODE_ALLMULTI = 4
169}; 190};
170 191
171enum bna_rx_event { 192enum bna_rx_event {
172 RX_E_START = 1, 193 RX_E_START = 1,
173 RX_E_STOP = 2, 194 RX_E_STOP = 2,
174 RX_E_FAIL = 3, 195 RX_E_FAIL = 3,
175 RX_E_RXF_STARTED = 4, 196 RX_E_STARTED = 4,
176 RX_E_RXF_STOPPED = 5, 197 RX_E_STOPPED = 5,
177 RX_E_RXQ_STOPPED = 6, 198 RX_E_RXF_STARTED = 6,
199 RX_E_RXF_STOPPED = 7,
200 RX_E_CLEANUP_DONE = 8,
178}; 201};
179 202
180enum bna_rx_state { 203enum bna_rx_state {
@@ -186,14 +209,13 @@ enum bna_rx_state {
186}; 209};
187 210
188enum bna_rx_flags { 211enum bna_rx_flags {
189 BNA_RX_F_ENABLE = 0x01, /* bnad enabled rxf */ 212 BNA_RX_F_ENET_STARTED = 1,
190 BNA_RX_F_PORT_ENABLED = 0x02, /* Port object is enabled */ 213 BNA_RX_F_ENABLED = 2,
191 BNA_RX_F_PORT_FAILED = 0x04, /* Port in failed state */
192}; 214};
193 215
194enum bna_rx_mod_flags { 216enum bna_rx_mod_flags {
195 BNA_RX_MOD_F_PORT_STARTED = 1, 217 BNA_RX_MOD_F_ENET_STARTED = 1,
196 BNA_RX_MOD_F_PORT_LOOPBACK = 2, 218 BNA_RX_MOD_F_ENET_LOOPBACK = 2,
197}; 219};
198 220
199enum bna_rxf_oper_state { 221enum bna_rxf_oper_state {
@@ -202,25 +224,17 @@ enum bna_rxf_oper_state {
202}; 224};
203 225
204enum bna_rxf_flags { 226enum bna_rxf_flags {
205 BNA_RXF_FL_STOP_PENDING = 0x01, 227 BNA_RXF_F_PAUSED = 1,
206 BNA_RXF_FL_FAILED = 0x02,
207 BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
208 BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
209 BNA_RXF_FL_RXF_ENABLED = 0x10,
210 BNA_RXF_FL_VLAN_CONFIG_PENDING = 0x20,
211}; 228};
212 229
213enum bna_rxf_event { 230enum bna_rxf_event {
214 RXF_E_START = 1, 231 RXF_E_START = 1,
215 RXF_E_STOP = 2, 232 RXF_E_STOP = 2,
216 RXF_E_FAIL = 3, 233 RXF_E_FAIL = 3,
217 RXF_E_CAM_FLTR_MOD = 4, 234 RXF_E_CONFIG = 4,
218 RXF_E_STARTED = 5, 235 RXF_E_PAUSE = 5,
219 RXF_E_STOPPED = 6, 236 RXF_E_RESUME = 6,
220 RXF_E_CAM_FLTR_RESP = 7, 237 RXF_E_FW_RESP = 7,
221 RXF_E_PAUSE = 8,
222 RXF_E_RESUME = 9,
223 RXF_E_STAT_CLEARED = 10,
224}; 238};
225 239
226enum bna_rxf_state { 240enum bna_rxf_state {
@@ -241,6 +255,12 @@ enum bna_port_type {
241 BNA_PORT_T_LOOPBACK_EXTERNAL = 2, 255 BNA_PORT_T_LOOPBACK_EXTERNAL = 2,
242}; 256};
243 257
258enum bna_enet_type {
259 BNA_ENET_T_REGULAR = 0,
260 BNA_ENET_T_LOOPBACK_INTERNAL = 1,
261 BNA_ENET_T_LOOPBACK_EXTERNAL = 2,
262};
263
244enum bna_link_status { 264enum bna_link_status {
245 BNA_LINK_DOWN = 0, 265 BNA_LINK_DOWN = 0,
246 BNA_LINK_UP = 1, 266 BNA_LINK_UP = 1,
@@ -253,6 +273,12 @@ enum bna_llport_flags {
253 BNA_LLPORT_F_RX_STARTED = 4 273 BNA_LLPORT_F_RX_STARTED = 4
254}; 274};
255 275
276enum bna_ethport_flags {
277 BNA_ETHPORT_F_ADMIN_UP = 1,
278 BNA_ETHPORT_F_PORT_ENABLED = 2,
279 BNA_ETHPORT_F_RX_STARTED = 4,
280};
281
256enum bna_port_flags { 282enum bna_port_flags {
257 BNA_PORT_F_DEVICE_READY = 1, 283 BNA_PORT_F_DEVICE_READY = 1,
258 BNA_PORT_F_ENABLED = 2, 284 BNA_PORT_F_ENABLED = 2,
@@ -260,6 +286,23 @@ enum bna_port_flags {
260 BNA_PORT_F_MTU_CHANGED = 8 286 BNA_PORT_F_MTU_CHANGED = 8
261}; 287};
262 288
289enum bna_enet_flags {
290 BNA_ENET_F_IOCETH_READY = 1,
291 BNA_ENET_F_ENABLED = 2,
292 BNA_ENET_F_PAUSE_CHANGED = 4,
293 BNA_ENET_F_MTU_CHANGED = 8
294};
295
296enum bna_rss_flags {
297 BNA_RSS_F_RIT_PENDING = 1,
298 BNA_RSS_F_CFG_PENDING = 2,
299 BNA_RSS_F_STATUS_PENDING = 4,
300};
301
302enum bna_mod_flags {
303 BNA_MOD_F_INIT_DONE = 1,
304};
305
263enum bna_pkt_rates { 306enum bna_pkt_rates {
264 BNA_PKT_RATE_10K = 10000, 307 BNA_PKT_RATE_10K = 10000,
265 BNA_PKT_RATE_20K = 20000, 308 BNA_PKT_RATE_20K = 20000,
@@ -289,10 +332,17 @@ enum bna_dim_bias_types {
289 BNA_BIAS_T_MAX = 2 332 BNA_BIAS_T_MAX = 2
290}; 333};
291 334
335#define BNA_MAX_NAME_SIZE 64
336struct bna_ident {
337 int id;
338 char name[BNA_MAX_NAME_SIZE];
339};
340
292struct bna_mac { 341struct bna_mac {
293 /* This should be the first one */ 342 /* This should be the first one */
294 struct list_head qe; 343 struct list_head qe;
295 u8 addr[ETH_ALEN]; 344 u8 addr[ETH_ALEN];
345 struct bna_mcam_handle *handle;
296}; 346};
297 347
298struct bna_mem_descr { 348struct bna_mem_descr {
@@ -338,23 +388,29 @@ struct bna_qpt {
338 u32 page_size; 388 u32 page_size;
339}; 389};
340 390
391struct bna_attr {
392 int num_txq;
393 int num_rxp;
394 int num_ucmac;
395 int num_mcmac;
396 int max_rit_size;
397};
398
341/** 399/**
342 * 400 *
343 * Device 401 * IOCEth
344 * 402 *
345 */ 403 */
346 404
347struct bna_device { 405struct bna_ioceth {
348 bfa_fsm_t fsm; 406 bfa_fsm_t fsm;
349 struct bfa_ioc ioc; 407 struct bfa_ioc ioc;
350 408
351 enum bna_intr_type intr_type; 409 struct bna_attr attr;
352 int vector; 410 struct bfa_msgq_cmd_entry msgq_cmd;
411 struct bfi_enet_attr_req attr_req;
353 412
354 void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status); 413 void (*stop_cbfn)(struct bnad *bnad);
355 struct bnad *ready_cbarg;
356
357 void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
358 struct bnad *stop_cbarg; 414 struct bnad *stop_cbarg;
359 415
360 struct bna *bna; 416 struct bna *bna;
@@ -447,6 +503,68 @@ struct bna_port {
447 503
448/** 504/**
449 * 505 *
506 * Enet
507 *
508 */
509
510struct bna_enet {
511 bfa_fsm_t fsm;
512 enum bna_enet_flags flags;
513
514 enum bna_enet_type type;
515
516 struct bna_pause_config pause_config;
517 int mtu;
518
519 /* Callback for bna_enet_disable(), enet_stop() */
520 void (*stop_cbfn)(void *);
521 void *stop_cbarg;
522
523 /* Callback for bna_enet_pause_config() */
524 void (*pause_cbfn)(struct bnad *);
525
526 /* Callback for bna_enet_mtu_set() */
527 void (*mtu_cbfn)(struct bnad *);
528
529 struct bfa_wc chld_stop_wc;
530
531 struct bfa_msgq_cmd_entry msgq_cmd;
532 struct bfi_enet_set_pause_req pause_req;
533
534 struct bna *bna;
535};
536
537/**
538 *
539 * Ethport
540 *
541 */
542
543struct bna_ethport {
544 bfa_fsm_t fsm;
545 enum bna_ethport_flags flags;
546
547 enum bna_link_status link_status;
548
549 int rx_started_count;
550
551 void (*stop_cbfn)(struct bna_enet *);
552
553 void (*adminup_cbfn)(struct bnad *, enum bna_cb_status);
554
555 void (*link_cbfn)(struct bnad *, enum bna_link_status);
556
557 struct bfa_msgq_cmd_entry msgq_cmd;
558 union {
559 struct bfi_enet_enable_req admin_req;
560 struct bfi_enet_diag_lb_req lpbk_req;
561 } bfi_enet_cmd;
562
563 struct bna *bna;
564};
565
566/**
567 *
450 * Interrupt Block 568 * Interrupt Block
451 * 569 *
452 */ 570 */
@@ -478,55 +596,20 @@ struct bna_ib_dbell {
478 u32 doorbell_ack; 596 u32 doorbell_ack;
479}; 597};
480 598
481/* Interrupt timer configuration */
482struct bna_ib_config {
483 u8 coalescing_timeo; /* Unit is 5usec. */
484
485 int interpkt_count;
486 int interpkt_timeo;
487
488 enum ib_flags ctrl_flags;
489};
490
491/* IB structure */ 599/* IB structure */
492struct bna_ib { 600struct bna_ib {
493 /* This should be the first one */
494 struct list_head qe;
495
496 int ib_id;
497
498 int ref_count;
499 int start_count;
500
501 struct bna_dma_addr ib_seg_host_addr; 601 struct bna_dma_addr ib_seg_host_addr;
502 void *ib_seg_host_addr_kva; 602 void *ib_seg_host_addr_kva;
503 u32 idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
504
505 struct bna_ibidx_seg *idx_seg;
506 603
507 struct bna_ib_dbell door_bell; 604 struct bna_ib_dbell door_bell;
508 605
509 struct bna_intr *intr; 606 enum bna_intr_type intr_type;
510 607 int intr_vector;
511 struct bna_ib_config ib_config;
512
513 struct bna *bna;
514};
515
516/* IB module - keeps track of IBs and interrupts */
517struct bna_ib_mod {
518 struct bna_ib *ib; /* BFI_MAX_IB entries */
519 struct bna_intr *intr; /* BFI_MAX_IB entries */
520 struct bna_ibidx_seg *idx_seg; /* BNA_IBIDX_TOTAL_SEGS */
521
522 struct list_head ib_free_q;
523
524 struct list_head ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
525 608
526 struct list_head intr_free_q; 609 u8 coalescing_timeo; /* Unit is 5usec. */
527 struct list_head intr_active_q;
528 610
529 struct bna *bna; 611 int interpkt_count;
612 int interpkt_timeo;
530}; 613};
531 614
532/** 615/**
@@ -552,6 +635,7 @@ struct bna_tcb {
552 /* Control path */ 635 /* Control path */
553 struct bna_txq *txq; 636 struct bna_txq *txq;
554 struct bnad *bnad; 637 struct bnad *bnad;
638 void *priv; /* BNAD's cookie */
555 enum bna_intr_type intr_type; 639 enum bna_intr_type intr_type;
556 int intr_vector; 640 int intr_vector;
557 u8 priority; /* Current priority */ 641 u8 priority; /* Current priority */
@@ -565,68 +649,66 @@ struct bna_txq {
565 /* This should be the first one */ 649 /* This should be the first one */
566 struct list_head qe; 650 struct list_head qe;
567 651
568 int txq_id;
569
570 u8 priority; 652 u8 priority;
571 653
572 struct bna_qpt qpt; 654 struct bna_qpt qpt;
573 struct bna_tcb *tcb; 655 struct bna_tcb *tcb;
574 struct bna_ib *ib; 656 struct bna_ib ib;
575 int ib_seg_offset;
576 657
577 struct bna_tx *tx; 658 struct bna_tx *tx;
578 659
660 int hw_id;
661
579 u64 tx_packets; 662 u64 tx_packets;
580 u64 tx_bytes; 663 u64 tx_bytes;
581}; 664};
582 665
583/* TxF structure (hardware Tx Function) */
584struct bna_txf {
585 int txf_id;
586 enum txf_flags ctrl_flags;
587 u16 vlan;
588};
589
590/* Tx object */ 666/* Tx object */
591struct bna_tx { 667struct bna_tx {
592 /* This should be the first one */ 668 /* This should be the first one */
593 struct list_head qe; 669 struct list_head qe;
670 int rid;
671 int hw_id;
594 672
595 bfa_fsm_t fsm; 673 bfa_fsm_t fsm;
596 enum bna_tx_flags flags; 674 enum bna_tx_flags flags;
597 675
598 enum bna_tx_type type; 676 enum bna_tx_type type;
677 int num_txq;
599 678
600 struct list_head txq_q; 679 struct list_head txq_q;
601 struct bna_txf txf; 680 u16 txf_vlan_id;
602 681
603 /* Tx event handlers */ 682 /* Tx event handlers */
604 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); 683 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
605 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); 684 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
606 void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *); 685 void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
607 void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *); 686 void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
608 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *); 687 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
609 688
610 /* callback for bna_tx_disable(), bna_tx_stop() */ 689 /* callback for bna_tx_disable(), bna_tx_stop() */
611 void (*stop_cbfn)(void *arg, struct bna_tx *tx, 690 void (*stop_cbfn)(void *arg, struct bna_tx *tx);
612 enum bna_cb_status status);
613 void *stop_cbarg; 691 void *stop_cbarg;
614 692
615 /* callback for bna_tx_prio_set() */ 693 /* callback for bna_tx_prio_set() */
616 void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx, 694 void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
617 enum bna_cb_status status);
618 695
619 struct bfa_wc txq_stop_wc; 696 struct bfa_msgq_cmd_entry msgq_cmd;
620 697 union {
621 struct bna_mbox_qe mbox_qe; 698 struct bfi_enet_tx_cfg_req cfg_req;
699 struct bfi_enet_req req;
700 struct bfi_enet_tx_cfg_rsp cfg_rsp;
701 } bfi_enet_cmd;
622 702
623 struct bna *bna; 703 struct bna *bna;
624 void *priv; /* bnad's cookie */ 704 void *priv; /* bnad's cookie */
625}; 705};
626 706
707/* Tx object configuration used during creation */
627struct bna_tx_config { 708struct bna_tx_config {
628 int num_txq; 709 int num_txq;
629 int txq_depth; 710 int txq_depth;
711 int coalescing_timeo;
630 enum bna_tx_type tx_type; 712 enum bna_tx_type tx_type;
631}; 713};
632 714
@@ -635,9 +717,9 @@ struct bna_tx_event_cbfn {
635 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); 717 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
636 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); 718 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
637 /* Mandatory */ 719 /* Mandatory */
638 void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *); 720 void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *);
639 void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *); 721 void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *);
640 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *); 722 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *);
641}; 723};
642 724
643/* Tx module - keeps track of free, active tx objects */ 725/* Tx module - keeps track of free, active tx objects */
@@ -651,17 +733,19 @@ struct bna_tx_mod {
651 struct list_head txq_free_q; 733 struct list_head txq_free_q;
652 734
653 /* callback for bna_tx_mod_stop() */ 735 /* callback for bna_tx_mod_stop() */
654 void (*stop_cbfn)(struct bna_port *port, 736 void (*stop_cbfn)(struct bna_enet *enet);
655 enum bna_cb_status status);
656 737
657 struct bfa_wc tx_stop_wc; 738 struct bfa_wc tx_stop_wc;
658 739
659 enum bna_tx_mod_flags flags; 740 enum bna_tx_mod_flags flags;
660 741
661 int priority; 742 u8 prio_map;
662 int cee_link; 743 int default_prio;
744 int iscsi_over_cee;
745 int iscsi_prio;
746 int prio_reconfigured;
663 747
664 u32 txf_bmap[2]; 748 u32 rid_mask;
665 749
666 struct bna *bna; 750 struct bna *bna;
667}; 751};
@@ -693,13 +777,6 @@ struct bna_rit_segment {
693 struct bna_rit_entry *rit; 777 struct bna_rit_entry *rit;
694}; 778};
695 779
696struct bna_rit_mod {
697 struct bna_rit_entry *rit;
698 struct bna_rit_segment *rit_segment;
699
700 struct list_head rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
701};
702
703/** 780/**
704 * 781 *
705 * Rx object 782 * Rx object
@@ -719,8 +796,9 @@ struct bna_rcb {
719 int page_count; 796 int page_count;
720 /* Control path */ 797 /* Control path */
721 struct bna_rxq *rxq; 798 struct bna_rxq *rxq;
722 struct bna_cq *cq; 799 struct bna_ccb *ccb;
723 struct bnad *bnad; 800 struct bnad *bnad;
801 void *priv; /* BNAD's cookie */
724 unsigned long flags; 802 unsigned long flags;
725 int id; 803 int id;
726}; 804};
@@ -728,7 +806,6 @@ struct bna_rcb {
728/* RxQ structure - QPT, configuration */ 806/* RxQ structure - QPT, configuration */
729struct bna_rxq { 807struct bna_rxq {
730 struct list_head qe; 808 struct list_head qe;
731 int rxq_id;
732 809
733 int buffer_size; 810 int buffer_size;
734 int q_depth; 811 int q_depth;
@@ -739,6 +816,8 @@ struct bna_rxq {
739 struct bna_rxp *rxp; 816 struct bna_rxp *rxp;
740 struct bna_rx *rx; 817 struct bna_rx *rx;
741 818
819 int hw_id;
820
742 u64 rx_packets; 821 u64 rx_packets;
743 u64 rx_bytes; 822 u64 rx_bytes;
744 u64 rx_packets_with_error; 823 u64 rx_packets_with_error;
@@ -784,6 +863,7 @@ struct bna_ccb {
784 /* Control path */ 863 /* Control path */
785 struct bna_cq *cq; 864 struct bna_cq *cq;
786 struct bnad *bnad; 865 struct bnad *bnad;
866 void *priv; /* BNAD's cookie */
787 enum bna_intr_type intr_type; 867 enum bna_intr_type intr_type;
788 int intr_vector; 868 int intr_vector;
789 u8 rx_coalescing_timeo; /* For NAPI */ 869 u8 rx_coalescing_timeo; /* For NAPI */
@@ -793,46 +873,43 @@ struct bna_ccb {
793 873
794/* CQ QPT, configuration */ 874/* CQ QPT, configuration */
795struct bna_cq { 875struct bna_cq {
796 int cq_id;
797
798 struct bna_qpt qpt; 876 struct bna_qpt qpt;
799 struct bna_ccb *ccb; 877 struct bna_ccb *ccb;
800 878
801 struct bna_ib *ib; 879 struct bna_ib ib;
802 u8 ib_seg_offset;
803 880
804 struct bna_rx *rx; 881 struct bna_rx *rx;
805}; 882};
806 883
807struct bna_rss_config { 884struct bna_rss_config {
808 enum rss_hash_type hash_type; 885 enum bfi_enet_rss_type hash_type;
809 u8 hash_mask; 886 u8 hash_mask;
810 u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN]; 887 u32 toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN];
811}; 888};
812 889
813struct bna_hds_config { 890struct bna_hds_config {
814 enum hds_header_type hdr_type; 891 enum bfi_enet_hds_type hdr_type;
815 int header_size; 892 int forced_offset;
816}; 893};
817 894
818/* This structure is used during RX creation */ 895/* Rx object configuration used during creation */
819struct bna_rx_config { 896struct bna_rx_config {
820 enum bna_rx_type rx_type; 897 enum bna_rx_type rx_type;
821 int num_paths; 898 int num_paths;
822 enum bna_rxp_type rxp_type; 899 enum bna_rxp_type rxp_type;
823 int paused; 900 int paused;
824 int q_depth; 901 int q_depth;
902 int coalescing_timeo;
825 /* 903 /*
826 * Small/Large (or Header/Data) buffer size to be configured 904 * Small/Large (or Header/Data) buffer size to be configured
827 * for SLR and HDS queue type. Large buffer size comes from 905 * for SLR and HDS queue type. Large buffer size comes from
828 * port->mtu. 906 * enet->mtu.
829 */ 907 */
830 int small_buff_size; 908 int small_buff_size;
831 909
832 enum bna_status rss_status; 910 enum bna_status rss_status;
833 struct bna_rss_config rss_config; 911 struct bna_rss_config rss_config;
834 912
835 enum bna_status hds_status;
836 struct bna_hds_config hds_config; 913 struct bna_hds_config hds_config;
837 914
838 enum bna_status vlan_strip_status; 915 enum bna_status vlan_strip_status;
@@ -851,51 +928,35 @@ struct bna_rxp {
851 928
852 /* MSI-x vector number for configuring RSS */ 929 /* MSI-x vector number for configuring RSS */
853 int vector; 930 int vector;
854 931 int hw_id;
855 struct bna_mbox_qe mbox_qe;
856};
857
858/* HDS configuration structure */
859struct bna_rxf_hds {
860 enum hds_header_type hdr_type;
861 int header_size;
862};
863
864/* RSS configuration structure */
865struct bna_rxf_rss {
866 enum rss_hash_type hash_type;
867 u8 hash_mask;
868 u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
869}; 932};
870 933
871/* RxF structure (hardware Rx Function) */ 934/* RxF structure (hardware Rx Function) */
872struct bna_rxf { 935struct bna_rxf {
873 bfa_fsm_t fsm; 936 bfa_fsm_t fsm;
874 int rxf_id; 937 enum bna_rxf_flags flags;
875 enum rxf_flags ctrl_flags; 938
876 u16 default_vlan_tag; 939 struct bfa_msgq_cmd_entry msgq_cmd;
877 enum bna_rxf_oper_state rxf_oper_state; 940 union {
878 enum bna_status hds_status; 941 struct bfi_enet_enable_req req;
879 struct bna_rxf_hds hds_cfg; 942 struct bfi_enet_rss_cfg_req rss_req;
880 enum bna_status rss_status; 943 struct bfi_enet_rit_req rit_req;
881 struct bna_rxf_rss rss_cfg; 944 struct bfi_enet_rx_vlan_req vlan_req;
882 struct bna_rit_segment *rit_segment; 945 struct bfi_enet_mcast_add_req mcast_add_req;
883 struct bna_rx *rx; 946 struct bfi_enet_mcast_del_req mcast_del_req;
884 u32 forced_offset; 947 struct bfi_enet_ucast_req ucast_req;
885 struct bna_mbox_qe mbox_qe; 948 } bfi_enet_cmd;
886 int mcast_rxq_id;
887 949
888 /* callback for bna_rxf_start() */ 950 /* callback for bna_rxf_start() */
889 void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status); 951 void (*start_cbfn) (struct bna_rx *rx);
890 struct bna_rx *start_cbarg; 952 struct bna_rx *start_cbarg;
891 953
892 /* callback for bna_rxf_stop() */ 954 /* callback for bna_rxf_stop() */
893 void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status); 955 void (*stop_cbfn) (struct bna_rx *rx);
894 struct bna_rx *stop_cbarg; 956 struct bna_rx *stop_cbarg;
895 957
896 /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */ 958 /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
897 void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx, 959 void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
898 enum bna_cb_status status);
899 struct bnad *oper_state_cbarg; 960 struct bnad *oper_state_cbarg;
900 961
901 /** 962 /**
@@ -905,25 +966,25 @@ struct bna_rxf {
905 * bna_rxf_{ucast/mcast}_del(), 966 * bna_rxf_{ucast/mcast}_del(),
906 * bna_rxf_mode_set() 967 * bna_rxf_mode_set()
907 */ 968 */
908 void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx, 969 void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx);
909 enum bna_cb_status status);
910 struct bnad *cam_fltr_cbarg; 970 struct bnad *cam_fltr_cbarg;
911 971
912 enum bna_rxf_flags rxf_flags;
913
914 /* List of unicast addresses yet to be applied to h/w */ 972 /* List of unicast addresses yet to be applied to h/w */
915 struct list_head ucast_pending_add_q; 973 struct list_head ucast_pending_add_q;
916 struct list_head ucast_pending_del_q; 974 struct list_head ucast_pending_del_q;
975 struct bna_mac *ucast_pending_mac;
917 int ucast_pending_set; 976 int ucast_pending_set;
918 /* ucast addresses applied to the h/w */ 977 /* ucast addresses applied to the h/w */
919 struct list_head ucast_active_q; 978 struct list_head ucast_active_q;
920 struct bna_mac *ucast_active_mac; 979 struct bna_mac ucast_active_mac;
980 int ucast_active_set;
921 981
922 /* List of multicast addresses yet to be applied to h/w */ 982 /* List of multicast addresses yet to be applied to h/w */
923 struct list_head mcast_pending_add_q; 983 struct list_head mcast_pending_add_q;
924 struct list_head mcast_pending_del_q; 984 struct list_head mcast_pending_del_q;
925 /* multicast addresses applied to the h/w */ 985 /* multicast addresses applied to the h/w */
926 struct list_head mcast_active_q; 986 struct list_head mcast_active_q;
987 struct list_head mcast_handle_q;
927 988
928 /* Rx modes yet to be applied to h/w */ 989 /* Rx modes yet to be applied to h/w */
929 enum bna_rxmode rxmode_pending; 990 enum bna_rxmode rxmode_pending;
@@ -931,41 +992,58 @@ struct bna_rxf {
931 /* Rx modes applied to h/w */ 992 /* Rx modes applied to h/w */
932 enum bna_rxmode rxmode_active; 993 enum bna_rxmode rxmode_active;
933 994
995 u8 vlan_pending_bitmask;
934 enum bna_status vlan_filter_status; 996 enum bna_status vlan_filter_status;
935 u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32]; 997 u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32];
998 bool vlan_strip_pending;
999 enum bna_status vlan_strip_status;
1000
1001 enum bna_rss_flags rss_pending;
1002 enum bna_status rss_status;
1003 struct bna_rss_config rss_cfg;
1004 u8 *rit;
1005 int rit_size;
1006
1007 struct bna_rx *rx;
936}; 1008};
937 1009
938/* Rx object */ 1010/* Rx object */
939struct bna_rx { 1011struct bna_rx {
940 /* This should be the first one */ 1012 /* This should be the first one */
941 struct list_head qe; 1013 struct list_head qe;
1014 int rid;
1015 int hw_id;
942 1016
943 bfa_fsm_t fsm; 1017 bfa_fsm_t fsm;
944 1018
945 enum bna_rx_type type; 1019 enum bna_rx_type type;
946 1020
947 /* list-head for RX path objects */ 1021 int num_paths;
948 struct list_head rxp_q; 1022 struct list_head rxp_q;
949 1023
1024 struct bna_hds_config hds_cfg;
1025
950 struct bna_rxf rxf; 1026 struct bna_rxf rxf;
951 1027
952 enum bna_rx_flags rx_flags; 1028 enum bna_rx_flags rx_flags;
953 1029
954 struct bna_mbox_qe mbox_qe; 1030 struct bfa_msgq_cmd_entry msgq_cmd;
955 1031 union {
956 struct bfa_wc rxq_stop_wc; 1032 struct bfi_enet_rx_cfg_req cfg_req;
1033 struct bfi_enet_req req;
1034 struct bfi_enet_rx_cfg_rsp cfg_rsp;
1035 } bfi_enet_cmd;
957 1036
958 /* Rx event handlers */ 1037 /* Rx event handlers */
959 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); 1038 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
960 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); 1039 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
961 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); 1040 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
962 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); 1041 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
963 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *); 1042 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
964 void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *); 1043 void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
965 1044
966 /* callback for bna_rx_disable(), bna_rx_stop() */ 1045 /* callback for bna_rx_disable(), bna_rx_stop() */
967 void (*stop_cbfn)(void *arg, struct bna_rx *rx, 1046 void (*stop_cbfn)(void *arg, struct bna_rx *rx);
968 enum bna_cb_status status);
969 void *stop_cbarg; 1047 void *stop_cbarg;
970 1048
971 struct bna *bna; 1049 struct bna *bna;
@@ -979,8 +1057,8 @@ struct bna_rx_event_cbfn {
979 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); 1057 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
980 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); 1058 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
981 /* Mandatory */ 1059 /* Mandatory */
982 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *); 1060 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
983 void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *); 1061 void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
984}; 1062};
985 1063
986/* Rx module - keeps track of free, active rx objects */ 1064/* Rx module - keeps track of free, active rx objects */
@@ -1003,12 +1081,11 @@ struct bna_rx_mod {
1003 enum bna_rx_mod_flags flags; 1081 enum bna_rx_mod_flags flags;
1004 1082
1005 /* callback for bna_rx_mod_stop() */ 1083 /* callback for bna_rx_mod_stop() */
1006 void (*stop_cbfn)(struct bna_port *port, 1084 void (*stop_cbfn)(struct bna_enet *enet);
1007 enum bna_cb_status status);
1008 1085
1009 struct bfa_wc rx_stop_wc; 1086 struct bfa_wc rx_stop_wc;
1010 u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX]; 1087 u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
1011 u32 rxf_bmap[2]; 1088 u32 rid_mask;
1012}; 1089};
1013 1090
1014/** 1091/**
@@ -1024,9 +1101,18 @@ struct bna_ucam_mod {
1024 struct bna *bna; 1101 struct bna *bna;
1025}; 1102};
1026 1103
1104struct bna_mcam_handle {
1105 /* This should be the first one */
1106 struct list_head qe;
1107 int handle;
1108 int refcnt;
1109};
1110
1027struct bna_mcam_mod { 1111struct bna_mcam_mod {
1028 struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */ 1112 struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
1113 struct bna_mcam_handle *mchandle; /* BFI_MAX_MCMAC entries */
1029 struct list_head free_q; 1114 struct list_head free_q;
1115 struct list_head free_handle_q;
1030 1116
1031 struct bna *bna; 1117 struct bna *bna;
1032}; 1118};
@@ -1059,7 +1145,6 @@ struct bna_rx_stats {
1059 int num_active_mcast; 1145 int num_active_mcast;
1060 int rxmode_active; 1146 int rxmode_active;
1061 int vlan_filter_status; 1147 int vlan_filter_status;
1062 u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
1063 int rss_status; 1148 int rss_status;
1064 int hds_status; 1149 int hds_status;
1065}; 1150};
@@ -1072,15 +1157,22 @@ struct bna_sw_stats {
1072 int priority; 1157 int priority;
1073 int num_active_tx; 1158 int num_active_tx;
1074 int num_active_rx; 1159 int num_active_rx;
1075 struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
1076 struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
1077}; 1160};
1078 1161
1079struct bna_stats { 1162struct bna_stats {
1080 u32 txf_bmap[2]; 1163 struct bna_dma_addr hw_stats_dma;
1081 u32 rxf_bmap[2]; 1164 struct bfi_enet_stats *hw_stats_kva;
1082 struct bfi_ll_stats *hw_stats; 1165 struct bfi_enet_stats hw_stats;
1083 struct bna_sw_stats *sw_stats; 1166};
1167
1168struct bna_stats_mod {
1169 bool ioc_ready;
1170 bool stats_get_busy;
1171 bool stats_clr_busy;
1172 struct bfa_msgq_cmd_entry stats_get_cmd;
1173 struct bfa_msgq_cmd_entry stats_clr_cmd;
1174 struct bfi_enet_stats_req stats_get;
1175 struct bfi_enet_stats_req stats_clr;
1084}; 1176};
1085 1177
1086/** 1178/**
@@ -1090,38 +1182,32 @@ struct bna_stats {
1090 */ 1182 */
1091 1183
1092struct bna { 1184struct bna {
1185 struct bna_ident ident;
1093 struct bfa_pcidev pcidev; 1186 struct bfa_pcidev pcidev;
1094 1187
1095 int port_num; 1188 struct bna_reg regs;
1189 struct bna_bit_defn bits;
1096 1190
1097 struct bna_chip_regs regs;
1098
1099 struct bna_dma_addr hw_stats_dma;
1100 struct bna_stats stats; 1191 struct bna_stats stats;
1101 1192
1102 struct bna_device device; 1193 struct bna_ioceth ioceth;
1103 struct bfa_cee cee; 1194 struct bfa_cee cee;
1195 struct bfa_msgq msgq;
1104 1196
1105 struct bna_mbox_mod mbox_mod; 1197 struct bna_ethport ethport;
1106 1198 struct bna_enet enet;
1107 struct bna_port port; 1199 struct bna_stats_mod stats_mod;
1108 1200
1109 struct bna_tx_mod tx_mod; 1201 struct bna_tx_mod tx_mod;
1110
1111 struct bna_rx_mod rx_mod; 1202 struct bna_rx_mod rx_mod;
1112
1113 struct bna_ib_mod ib_mod;
1114
1115 struct bna_ucam_mod ucam_mod; 1203 struct bna_ucam_mod ucam_mod;
1116 struct bna_mcam_mod mcam_mod; 1204 struct bna_mcam_mod mcam_mod;
1117 1205
1118 struct bna_rit_mod rit_mod; 1206 enum bna_mod_flags mod_flags;
1119
1120 int rxf_promisc_id;
1121 1207
1122 struct bna_mbox_qe mbox_qe; 1208 int default_mode_rid;
1209 int promisc_rid;
1123 1210
1124 struct bnad *bnad; 1211 struct bnad *bnad;
1125}; 1212};
1126
1127#endif /* __BNA_TYPES_H__ */ 1213#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 8e35b2596f93..5ad07eab7bec 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -441,11 +441,15 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
441 struct bnad_skb_unmap *unmap_array; 441 struct bnad_skb_unmap *unmap_array;
442 struct sk_buff *skb; 442 struct sk_buff *skb;
443 u32 flags, unmap_cons; 443 u32 flags, unmap_cons;
444 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
445 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 444 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
445 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
446
447 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
446 448
447 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) 449 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
450 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
448 return 0; 451 return 0;
452 }
449 453
450 prefetch(bnad->netdev); 454 prefetch(bnad->netdev);
451 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, 455 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
@@ -455,10 +459,10 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
455 packets++; 459 packets++;
456 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); 460 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
457 461
458 if (qid0 == cmpl->rxq_id) 462 if (bna_is_small_rxq(cmpl->rxq_id))
459 rcb = ccb->rcb[0];
460 else
461 rcb = ccb->rcb[1]; 463 rcb = ccb->rcb[1];
464 else
465 rcb = ccb->rcb[0];
462 466
463 unmap_q = rcb->unmap_q; 467 unmap_q = rcb->unmap_q;
464 unmap_array = unmap_q->unmap_array; 468 unmap_array = unmap_q->unmap_array;
@@ -518,12 +522,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
518 if (flags & BNA_CQ_EF_VLAN) 522 if (flags & BNA_CQ_EF_VLAN)
519 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); 523 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
520 524
521 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 525 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
522 struct bnad_rx_ctrl *rx_ctrl;
523
524 rx_ctrl = (struct bnad_rx_ctrl *) ccb->ctrl;
525 napi_gro_receive(&rx_ctrl->napi, skb); 526 napi_gro_receive(&rx_ctrl->napi, skb);
526 } else { 527 else {
527 netif_receive_skb(skb); 528 netif_receive_skb(skb);
528 } 529 }
529 530
@@ -545,6 +546,8 @@ next:
545 bna_ib_ack(ccb->i_dbell, 0); 546 bna_ib_ack(ccb->i_dbell, 0);
546 } 547 }
547 548
549 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
550
548 return packets; 551 return packets;
549} 552}
550 553
@@ -611,7 +614,7 @@ bnad_msix_mbox_handler(int irq, void *data)
611 614
612 bna_intr_status_get(&bnad->bna, intr_status); 615 bna_intr_status_get(&bnad->bna, intr_status);
613 616
614 if (BNA_IS_MBOX_ERR_INTR(intr_status)) 617 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
615 bna_mbox_handler(&bnad->bna, intr_status); 618 bna_mbox_handler(&bnad->bna, intr_status);
616 619
617 spin_unlock_irqrestore(&bnad->bna_lock, flags); 620 spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -628,6 +631,7 @@ bnad_isr(int irq, void *data)
628 struct bnad *bnad = (struct bnad *)data; 631 struct bnad *bnad = (struct bnad *)data;
629 struct bnad_rx_info *rx_info; 632 struct bnad_rx_info *rx_info;
630 struct bnad_rx_ctrl *rx_ctrl; 633 struct bnad_rx_ctrl *rx_ctrl;
634 struct bna_tcb *tcb = NULL;
631 635
632 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) 636 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
633 return IRQ_NONE; 637 return IRQ_NONE;
@@ -639,7 +643,7 @@ bnad_isr(int irq, void *data)
639 643
640 spin_lock_irqsave(&bnad->bna_lock, flags); 644 spin_lock_irqsave(&bnad->bna_lock, flags);
641 645
642 if (BNA_IS_MBOX_ERR_INTR(intr_status)) 646 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
643 bna_mbox_handler(&bnad->bna, intr_status); 647 bna_mbox_handler(&bnad->bna, intr_status);
644 648
645 spin_unlock_irqrestore(&bnad->bna_lock, flags); 649 spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -650,8 +654,11 @@ bnad_isr(int irq, void *data)
650 /* Process data interrupts */ 654 /* Process data interrupts */
651 /* Tx processing */ 655 /* Tx processing */
652 for (i = 0; i < bnad->num_tx; i++) { 656 for (i = 0; i < bnad->num_tx; i++) {
653 for (j = 0; j < bnad->num_txq_per_tx; j++) 657 for (j = 0; j < bnad->num_txq_per_tx; j++) {
654 bnad_tx(bnad, bnad->tx_info[i].tcb[j]); 658 tcb = bnad->tx_info[i].tcb[j];
659 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
660 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
661 }
655 } 662 }
656 /* Rx processing */ 663 /* Rx processing */
657 for (i = 0; i < bnad->num_rx; i++) { 664 for (i = 0; i < bnad->num_rx; i++) {
@@ -706,43 +713,49 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
706 713
707/* Callbacks */ 714/* Callbacks */
708void 715void
709bnad_cb_device_enable_mbox_intr(struct bnad *bnad) 716bnad_cb_mbox_intr_enable(struct bnad *bnad)
710{ 717{
711 bnad_enable_mbox_irq(bnad); 718 bnad_enable_mbox_irq(bnad);
712} 719}
713 720
714void 721void
715bnad_cb_device_disable_mbox_intr(struct bnad *bnad) 722bnad_cb_mbox_intr_disable(struct bnad *bnad)
716{ 723{
717 bnad_disable_mbox_irq(bnad); 724 bnad_disable_mbox_irq(bnad);
718} 725}
719 726
720void 727void
721bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status) 728bnad_cb_ioceth_ready(struct bnad *bnad)
729{
730 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
731 complete(&bnad->bnad_completions.ioc_comp);
732}
733
734void
735bnad_cb_ioceth_failed(struct bnad *bnad)
722{ 736{
737 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
723 complete(&bnad->bnad_completions.ioc_comp); 738 complete(&bnad->bnad_completions.ioc_comp);
724 bnad->bnad_completions.ioc_comp_status = status;
725} 739}
726 740
727void 741void
728bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status) 742bnad_cb_ioceth_disabled(struct bnad *bnad)
729{ 743{
744 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
730 complete(&bnad->bnad_completions.ioc_comp); 745 complete(&bnad->bnad_completions.ioc_comp);
731 bnad->bnad_completions.ioc_comp_status = status;
732} 746}
733 747
734static void 748static void
735bnad_cb_port_disabled(void *arg, enum bna_cb_status status) 749bnad_cb_enet_disabled(void *arg)
736{ 750{
737 struct bnad *bnad = (struct bnad *)arg; 751 struct bnad *bnad = (struct bnad *)arg;
738 752
739 complete(&bnad->bnad_completions.port_comp);
740
741 netif_carrier_off(bnad->netdev); 753 netif_carrier_off(bnad->netdev);
754 complete(&bnad->bnad_completions.enet_comp);
742} 755}
743 756
744void 757void
745bnad_cb_port_link_status(struct bnad *bnad, 758bnad_cb_ethport_link_status(struct bnad *bnad,
746 enum bna_link_status link_status) 759 enum bna_link_status link_status)
747{ 760{
748 bool link_up = 0; 761 bool link_up = 0;
@@ -750,34 +763,60 @@ bnad_cb_port_link_status(struct bnad *bnad,
750 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); 763 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
751 764
752 if (link_status == BNA_CEE_UP) { 765 if (link_status == BNA_CEE_UP) {
766 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
767 BNAD_UPDATE_CTR(bnad, cee_toggle);
753 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); 768 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
754 BNAD_UPDATE_CTR(bnad, cee_up); 769 } else {
755 } else 770 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
771 BNAD_UPDATE_CTR(bnad, cee_toggle);
756 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); 772 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
773 }
757 774
758 if (link_up) { 775 if (link_up) {
759 if (!netif_carrier_ok(bnad->netdev)) { 776 if (!netif_carrier_ok(bnad->netdev)) {
760 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0]; 777 uint tx_id, tcb_id;
761 if (!tcb) 778 printk(KERN_WARNING "bna: %s link up\n",
762 return;
763 pr_warn("bna: %s link up\n",
764 bnad->netdev->name); 779 bnad->netdev->name);
765 netif_carrier_on(bnad->netdev); 780 netif_carrier_on(bnad->netdev);
766 BNAD_UPDATE_CTR(bnad, link_toggle); 781 BNAD_UPDATE_CTR(bnad, link_toggle);
767 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { 782 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
768 /* Force an immediate Transmit Schedule */ 783 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
769 pr_info("bna: %s TX_STARTED\n", 784 tcb_id++) {
770 bnad->netdev->name); 785 struct bna_tcb *tcb =
771 netif_wake_queue(bnad->netdev); 786 bnad->tx_info[tx_id].tcb[tcb_id];
772 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 787 u32 txq_id;
773 } else { 788 if (!tcb)
774 netif_stop_queue(bnad->netdev); 789 continue;
775 BNAD_UPDATE_CTR(bnad, netif_queue_stop); 790
791 txq_id = tcb->id;
792
793 if (test_bit(BNAD_TXQ_TX_STARTED,
794 &tcb->flags)) {
795 /*
796 * Force an immediate
797 * Transmit Schedule */
798 printk(KERN_INFO "bna: %s %d "
799 "TXQ_STARTED\n",
800 bnad->netdev->name,
801 txq_id);
802 netif_wake_subqueue(
803 bnad->netdev,
804 txq_id);
805 BNAD_UPDATE_CTR(bnad,
806 netif_queue_wakeup);
807 } else {
808 netif_stop_subqueue(
809 bnad->netdev,
810 txq_id);
811 BNAD_UPDATE_CTR(bnad,
812 netif_queue_stop);
813 }
814 }
776 } 815 }
777 } 816 }
778 } else { 817 } else {
779 if (netif_carrier_ok(bnad->netdev)) { 818 if (netif_carrier_ok(bnad->netdev)) {
780 pr_warn("bna: %s link down\n", 819 printk(KERN_WARNING "bna: %s link down\n",
781 bnad->netdev->name); 820 bnad->netdev->name);
782 netif_carrier_off(bnad->netdev); 821 netif_carrier_off(bnad->netdev);
783 BNAD_UPDATE_CTR(bnad, link_toggle); 822 BNAD_UPDATE_CTR(bnad, link_toggle);
@@ -786,8 +825,7 @@ bnad_cb_port_link_status(struct bnad *bnad,
786} 825}
787 826
788static void 827static void
789bnad_cb_tx_disabled(void *arg, struct bna_tx *tx, 828bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
790 enum bna_cb_status status)
791{ 829{
792 struct bnad *bnad = (struct bnad *)arg; 830 struct bnad *bnad = (struct bnad *)arg;
793 831
@@ -864,108 +902,166 @@ bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
864} 902}
865 903
866static void 904static void
867bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) 905bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
868{ 906{
869 struct bnad_tx_info *tx_info = 907 struct bnad_tx_info *tx_info =
870 (struct bnad_tx_info *)tcb->txq->tx->priv; 908 (struct bnad_tx_info *)tx->priv;
871 909 struct bna_tcb *tcb;
872 if (tx_info != &bnad->tx_info[0]) 910 u32 txq_id;
873 return; 911 int i;
874 912
875 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); 913 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
876 netif_stop_queue(bnad->netdev); 914 tcb = tx_info->tcb[i];
877 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); 915 if (!tcb)
916 continue;
917 txq_id = tcb->id;
918 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
919 netif_stop_subqueue(bnad->netdev, txq_id);
920 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
921 bnad->netdev->name, txq_id);
922 }
878} 923}
879 924
880static void 925static void
881bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) 926bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
882{ 927{
883 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 928 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
929 struct bna_tcb *tcb;
930 struct bnad_unmap_q *unmap_q;
931 u32 txq_id;
932 int i;
884 933
885 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 934 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
886 return; 935 tcb = tx_info->tcb[i];
936 if (!tcb)
937 continue;
938 txq_id = tcb->id;
887 939
888 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags); 940 unmap_q = tcb->unmap_q;
889 941
890 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 942 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
891 cpu_relax(); 943 continue;
892 944
893 bnad_free_all_txbufs(bnad, tcb); 945 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
946 cpu_relax();
894 947
895 unmap_q->producer_index = 0; 948 bnad_free_all_txbufs(bnad, tcb);
896 unmap_q->consumer_index = 0;
897 949
898 smp_mb__before_clear_bit(); 950 unmap_q->producer_index = 0;
899 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 951 unmap_q->consumer_index = 0;
952
953 smp_mb__before_clear_bit();
954 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
955
956 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
957
958 if (netif_carrier_ok(bnad->netdev)) {
959 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
960 bnad->netdev->name, txq_id);
961 netif_wake_subqueue(bnad->netdev, txq_id);
962 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
963 }
964 }
900 965
901 /* 966 /*
902 * Workaround for first device enable failure & we 967 * Workaround for first ioceth enable failure & we
903 * get a 0 MAC address. We try to get the MAC address 968 * get a 0 MAC address. We try to get the MAC address
904 * again here. 969 * again here.
905 */ 970 */
906 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { 971 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
907 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr); 972 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
908 bnad_set_netdev_perm_addr(bnad); 973 bnad_set_netdev_perm_addr(bnad);
909 } 974 }
910
911 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
912
913 if (netif_carrier_ok(bnad->netdev)) {
914 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
915 netif_wake_queue(bnad->netdev);
916 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
917 }
918} 975}
919 976
920static void 977static void
921bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) 978bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
922{ 979{
923 /* Delay only once for the whole Tx Path Shutdown */ 980 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
924 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags)) 981 struct bna_tcb *tcb;
925 mdelay(BNAD_TXRX_SYNC_MDELAY); 982 int i;
983
984 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
985 tcb = tx_info->tcb[i];
986 if (!tcb)
987 continue;
988 }
989
990 mdelay(BNAD_TXRX_SYNC_MDELAY);
991 bna_tx_cleanup_complete(tx);
926} 992}
927 993
928static void 994static void
929bnad_cb_rx_cleanup(struct bnad *bnad, 995bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
930 struct bna_ccb *ccb)
931{ 996{
932 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); 997 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
998 struct bna_ccb *ccb;
999 struct bnad_rx_ctrl *rx_ctrl;
1000 int i;
1001
1002 mdelay(BNAD_TXRX_SYNC_MDELAY);
1003
1004 for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
1005 rx_ctrl = &rx_info->rx_ctrl[i];
1006 ccb = rx_ctrl->ccb;
1007 if (!ccb)
1008 continue;
1009
1010 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1011
1012 if (ccb->rcb[1])
1013 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
933 1014
934 if (ccb->rcb[1]) 1015 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
935 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); 1016 cpu_relax();
1017 }
936 1018
937 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags)) 1019 bna_rx_cleanup_complete(rx);
938 mdelay(BNAD_TXRX_SYNC_MDELAY);
939} 1020}
940 1021
941static void 1022static void
942bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb) 1023bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
943{ 1024{
944 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 1025 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
945 1026 struct bna_ccb *ccb;
946 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags); 1027 struct bna_rcb *rcb;
947 1028 struct bnad_rx_ctrl *rx_ctrl;
948 if (rcb == rcb->cq->ccb->rcb[0]) 1029 struct bnad_unmap_q *unmap_q;
949 bnad_cq_cmpl_init(bnad, rcb->cq->ccb); 1030 int i;
1031 int j;
950 1032
951 bnad_free_all_rxbufs(bnad, rcb); 1033 for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
1034 rx_ctrl = &rx_info->rx_ctrl[i];
1035 ccb = rx_ctrl->ccb;
1036 if (!ccb)
1037 continue;
952 1038
953 set_bit(BNAD_RXQ_STARTED, &rcb->flags); 1039 bnad_cq_cmpl_init(bnad, ccb);
954 1040
955 /* Now allocate & post buffers for this RCB */ 1041 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
956 /* !!Allocation in callback context */ 1042 rcb = ccb->rcb[j];
957 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 1043 if (!rcb)
958 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 1044 continue;
959 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 1045 bnad_free_all_rxbufs(bnad, rcb);
960 bnad_alloc_n_post_rxbufs(bnad, rcb); 1046
961 smp_mb__before_clear_bit(); 1047 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
962 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 1048 unmap_q = rcb->unmap_q;
1049
1050 /* Now allocate & post buffers for this RCB */
1051 /* !!Allocation in callback context */
1052 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1053 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1054 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1055 bnad_alloc_n_post_rxbufs(bnad, rcb);
1056 smp_mb__before_clear_bit();
1057 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1058 }
1059 }
963 } 1060 }
964} 1061}
965 1062
966static void 1063static void
967bnad_cb_rx_disabled(void *arg, struct bna_rx *rx, 1064bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
968 enum bna_cb_status status)
969{ 1065{
970 struct bnad *bnad = (struct bnad *)arg; 1066 struct bnad *bnad = (struct bnad *)arg;
971 1067
@@ -973,10 +1069,9 @@ bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
973} 1069}
974 1070
975static void 1071static void
976bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx, 1072bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
977 enum bna_cb_status status)
978{ 1073{
979 bnad->bnad_completions.mcast_comp_status = status; 1074 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
980 complete(&bnad->bnad_completions.mcast_comp); 1075 complete(&bnad->bnad_completions.mcast_comp);
981} 1076}
982 1077
@@ -995,6 +1090,13 @@ bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
995 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 1090 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
996} 1091}
997 1092
1093static void
1094bnad_cb_enet_mtu_set(struct bnad *bnad)
1095{
1096 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1097 complete(&bnad->bnad_completions.mtu_comp);
1098}
1099
998/* Resource allocation, free functions */ 1100/* Resource allocation, free functions */
999 1101
1000static void 1102static void
@@ -1073,23 +1175,17 @@ err_return:
1073 1175
1074/* Free IRQ for Mailbox */ 1176/* Free IRQ for Mailbox */
1075static void 1177static void
1076bnad_mbox_irq_free(struct bnad *bnad, 1178bnad_mbox_irq_free(struct bnad *bnad)
1077 struct bna_intr_info *intr_info)
1078{ 1179{
1079 int irq; 1180 int irq;
1080 unsigned long flags; 1181 unsigned long flags;
1081 1182
1082 if (intr_info->idl == NULL)
1083 return;
1084
1085 spin_lock_irqsave(&bnad->bna_lock, flags); 1183 spin_lock_irqsave(&bnad->bna_lock, flags);
1086 bnad_disable_mbox_irq(bnad); 1184 bnad_disable_mbox_irq(bnad);
1087 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1185 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1088 1186
1089 irq = BNAD_GET_MBOX_IRQ(bnad); 1187 irq = BNAD_GET_MBOX_IRQ(bnad);
1090 free_irq(irq, bnad); 1188 free_irq(irq, bnad);
1091
1092 kfree(intr_info->idl);
1093} 1189}
1094 1190
1095/* 1191/*
@@ -1098,32 +1194,22 @@ bnad_mbox_irq_free(struct bnad *bnad,
1098 * from bna 1194 * from bna
1099 */ 1195 */
1100static int 1196static int
1101bnad_mbox_irq_alloc(struct bnad *bnad, 1197bnad_mbox_irq_alloc(struct bnad *bnad)
1102 struct bna_intr_info *intr_info)
1103{ 1198{
1104 int err = 0; 1199 int err = 0;
1105 unsigned long irq_flags, flags; 1200 unsigned long irq_flags, flags;
1106 u32 irq; 1201 u32 irq;
1107 irq_handler_t irq_handler; 1202 irq_handler_t irq_handler;
1108 1203
1109 /* Mbox should use only 1 vector */
1110
1111 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1112 if (!intr_info->idl)
1113 return -ENOMEM;
1114
1115 spin_lock_irqsave(&bnad->bna_lock, flags); 1204 spin_lock_irqsave(&bnad->bna_lock, flags);
1116 if (bnad->cfg_flags & BNAD_CF_MSIX) { 1205 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1117 irq_handler = (irq_handler_t)bnad_msix_mbox_handler; 1206 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1118 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; 1207 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1119 irq_flags = 0; 1208 irq_flags = 0;
1120 intr_info->intr_type = BNA_INTR_T_MSIX;
1121 intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
1122 } else { 1209 } else {
1123 irq_handler = (irq_handler_t)bnad_isr; 1210 irq_handler = (irq_handler_t)bnad_isr;
1124 irq = bnad->pcidev->irq; 1211 irq = bnad->pcidev->irq;
1125 irq_flags = IRQF_SHARED; 1212 irq_flags = IRQF_SHARED;
1126 intr_info->intr_type = BNA_INTR_T_INTX;
1127 } 1213 }
1128 1214
1129 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1215 spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1140,11 +1226,6 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
1140 err = request_irq(irq, irq_handler, irq_flags, 1226 err = request_irq(irq, irq_handler, irq_flags,
1141 bnad->mbox_irq_name, bnad); 1227 bnad->mbox_irq_name, bnad);
1142 1228
1143 if (err) {
1144 kfree(intr_info->idl);
1145 intr_info->idl = NULL;
1146 }
1147
1148 return err; 1229 return err;
1149} 1230}
1150 1231
@@ -1158,7 +1239,7 @@ bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1158/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ 1239/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1159static int 1240static int
1160bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, 1241bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1161 uint txrx_id, struct bna_intr_info *intr_info) 1242 u32 txrx_id, struct bna_intr_info *intr_info)
1162{ 1243{
1163 int i, vector_start = 0; 1244 int i, vector_start = 0;
1164 u32 cfg_flags; 1245 u32 cfg_flags;
@@ -1241,7 +1322,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1241 */ 1322 */
1242static int 1323static int
1243bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, 1324bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1244 uint tx_id, int num_txqs) 1325 u32 tx_id, int num_txqs)
1245{ 1326{
1246 int i; 1327 int i;
1247 int err; 1328 int err;
@@ -1294,7 +1375,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1294 */ 1375 */
1295static int 1376static int
1296bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, 1377bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1297 uint rx_id, int num_rxps) 1378 u32 rx_id, int num_rxps)
1298{ 1379{
1299 int i; 1380 int i;
1300 int err; 1381 int err;
@@ -1338,7 +1419,7 @@ bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1338/* Allocates memory and interrupt resources for Tx object */ 1419/* Allocates memory and interrupt resources for Tx object */
1339static int 1420static int
1340bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, 1421bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1341 uint tx_id) 1422 u32 tx_id)
1342{ 1423{
1343 int i, err = 0; 1424 int i, err = 0;
1344 1425
@@ -1407,7 +1488,7 @@ bnad_ioc_timeout(unsigned long data)
1407 unsigned long flags; 1488 unsigned long flags;
1408 1489
1409 spin_lock_irqsave(&bnad->bna_lock, flags); 1490 spin_lock_irqsave(&bnad->bna_lock, flags);
1410 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc); 1491 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1411 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1492 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1412} 1493}
1413 1494
@@ -1418,7 +1499,7 @@ bnad_ioc_hb_check(unsigned long data)
1418 unsigned long flags; 1499 unsigned long flags;
1419 1500
1420 spin_lock_irqsave(&bnad->bna_lock, flags); 1501 spin_lock_irqsave(&bnad->bna_lock, flags);
1421 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc); 1502 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1503 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423} 1504}
1424 1505
@@ -1429,7 +1510,7 @@ bnad_iocpf_timeout(unsigned long data)
1429 unsigned long flags; 1510 unsigned long flags;
1430 1511
1431 spin_lock_irqsave(&bnad->bna_lock, flags); 1512 spin_lock_irqsave(&bnad->bna_lock, flags);
1432 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc); 1513 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1433 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1514 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1434} 1515}
1435 1516
@@ -1440,7 +1521,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
1440 unsigned long flags; 1521 unsigned long flags;
1441 1522
1442 spin_lock_irqsave(&bnad->bna_lock, flags); 1523 spin_lock_irqsave(&bnad->bna_lock, flags);
1443 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc); 1524 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1444 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1525 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1445} 1526}
1446 1527
@@ -1499,7 +1580,7 @@ bnad_stats_timeout(unsigned long data)
1499 return; 1580 return;
1500 1581
1501 spin_lock_irqsave(&bnad->bna_lock, flags); 1582 spin_lock_irqsave(&bnad->bna_lock, flags);
1502 bna_stats_get(&bnad->bna); 1583 bna_hw_stats_get(&bnad->bna);
1503 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1584 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1504} 1585}
1505 1586
@@ -1632,7 +1713,7 @@ bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1632 1713
1633/* Should be held with conf_lock held */ 1714/* Should be held with conf_lock held */
1634void 1715void
1635bnad_cleanup_tx(struct bnad *bnad, uint tx_id) 1716bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1636{ 1717{
1637 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1718 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1638 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; 1719 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
@@ -1656,6 +1737,7 @@ bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1656 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1737 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1657 1738
1658 tx_info->tx = NULL; 1739 tx_info->tx = NULL;
1740 tx_info->tx_id = 0;
1659 1741
1660 if (0 == tx_id) 1742 if (0 == tx_id)
1661 tasklet_kill(&bnad->tx_free_tasklet); 1743 tasklet_kill(&bnad->tx_free_tasklet);
@@ -1665,7 +1747,7 @@ bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1665 1747
1666/* Should be held with conf_lock held */ 1748/* Should be held with conf_lock held */
1667int 1749int
1668bnad_setup_tx(struct bnad *bnad, uint tx_id) 1750bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1669{ 1751{
1670 int err; 1752 int err;
1671 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1753 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
@@ -1677,10 +1759,13 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
1677 struct bna_tx *tx; 1759 struct bna_tx *tx;
1678 unsigned long flags; 1760 unsigned long flags;
1679 1761
1762 tx_info->tx_id = tx_id;
1763
1680 /* Initialize the Tx object configuration */ 1764 /* Initialize the Tx object configuration */
1681 tx_config->num_txq = bnad->num_txq_per_tx; 1765 tx_config->num_txq = bnad->num_txq_per_tx;
1682 tx_config->txq_depth = bnad->txq_depth; 1766 tx_config->txq_depth = bnad->txq_depth;
1683 tx_config->tx_type = BNA_TX_T_REGULAR; 1767 tx_config->tx_type = BNA_TX_T_REGULAR;
1768 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1684 1769
1685 /* Initialize the tx event handlers */ 1770 /* Initialize the tx event handlers */
1686 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup; 1771 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
@@ -1741,14 +1826,15 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1741{ 1826{
1742 rx_config->rx_type = BNA_RX_T_REGULAR; 1827 rx_config->rx_type = BNA_RX_T_REGULAR;
1743 rx_config->num_paths = bnad->num_rxp_per_rx; 1828 rx_config->num_paths = bnad->num_rxp_per_rx;
1829 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1744 1830
1745 if (bnad->num_rxp_per_rx > 1) { 1831 if (bnad->num_rxp_per_rx > 1) {
1746 rx_config->rss_status = BNA_STATUS_T_ENABLED; 1832 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1747 rx_config->rss_config.hash_type = 1833 rx_config->rss_config.hash_type =
1748 (BFI_RSS_T_V4_TCP | 1834 (BFI_ENET_RSS_IPV6 |
1749 BFI_RSS_T_V6_TCP | 1835 BFI_ENET_RSS_IPV6_TCP |
1750 BFI_RSS_T_V4_IP | 1836 BFI_ENET_RSS_IPV4 |
1751 BFI_RSS_T_V6_IP); 1837 BFI_ENET_RSS_IPV4_TCP);
1752 rx_config->rss_config.hash_mask = 1838 rx_config->rss_config.hash_mask =
1753 bnad->num_rxp_per_rx - 1; 1839 bnad->num_rxp_per_rx - 1;
1754 get_random_bytes(rx_config->rss_config.toeplitz_hash_key, 1840 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
@@ -1768,7 +1854,7 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1768 1854
1769/* Called with mutex_lock(&bnad->conf_mutex) held */ 1855/* Called with mutex_lock(&bnad->conf_mutex) held */
1770void 1856void
1771bnad_cleanup_rx(struct bnad *bnad, uint rx_id) 1857bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1772{ 1858{
1773 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 1859 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1774 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1860 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
@@ -1811,7 +1897,7 @@ bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1811 1897
1812/* Called with mutex_lock(&bnad->conf_mutex) held */ 1898/* Called with mutex_lock(&bnad->conf_mutex) held */
1813int 1899int
1814bnad_setup_rx(struct bnad *bnad, uint rx_id) 1900bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1815{ 1901{
1816 int err; 1902 int err;
1817 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 1903 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
@@ -1823,6 +1909,8 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
1823 struct bna_rx *rx; 1909 struct bna_rx *rx;
1824 unsigned long flags; 1910 unsigned long flags;
1825 1911
1912 rx_info->rx_id = rx_id;
1913
1826 /* Initialize the Rx object configuration */ 1914 /* Initialize the Rx object configuration */
1827 bnad_init_rx_config(bnad, rx_config); 1915 bnad_init_rx_config(bnad, rx_config);
1828 1916
@@ -1978,7 +2066,7 @@ bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1978 u16 vid; 2066 u16 vid;
1979 unsigned long flags; 2067 unsigned long flags;
1980 2068
1981 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1))); 2069 BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
1982 2070
1983 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { 2071 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
1984 spin_lock_irqsave(&bnad->bna_lock, flags); 2072 spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -2031,11 +2119,11 @@ bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2031void 2119void
2032bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) 2120bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2033{ 2121{
2034 struct bfi_ll_stats_mac *mac_stats; 2122 struct bfi_enet_stats_mac *mac_stats;
2035 u64 bmap; 2123 u32 bmap;
2036 int i; 2124 int i;
2037 2125
2038 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats; 2126 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2039 stats->rx_errors = 2127 stats->rx_errors =
2040 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + 2128 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2041 mac_stats->rx_frame_length_error + mac_stats->rx_code_error + 2129 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
@@ -2054,13 +2142,12 @@ bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2054 stats->rx_crc_errors = mac_stats->rx_fcs_error; 2142 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2055 stats->rx_frame_errors = mac_stats->rx_alignment_error; 2143 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2056 /* recv'r fifo overrun */ 2144 /* recv'r fifo overrun */
2057 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] | 2145 bmap = bna_rx_rid_mask(&bnad->bna);
2058 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32); 2146 for (i = 0; bmap; i++) {
2059 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2060 if (bmap & 1) { 2147 if (bmap & 1) {
2061 stats->rx_fifo_errors += 2148 stats->rx_fifo_errors +=
2062 bnad->stats.bna_stats-> 2149 bnad->stats.bna_stats->
2063 hw_stats->rxf_stats[i].frame_drops; 2150 hw_stats.rxf_stats[i].frame_drops;
2064 break; 2151 break;
2065 } 2152 }
2066 bmap >>= 1; 2153 bmap >>= 1;
@@ -2158,7 +2245,7 @@ bnad_q_num_init(struct bnad *bnad)
2158 * Called with bnad->bna_lock held b'cos of cfg_flags access 2245 * Called with bnad->bna_lock held b'cos of cfg_flags access
2159 */ 2246 */
2160static void 2247static void
2161bnad_q_num_adjust(struct bnad *bnad, int msix_vectors) 2248bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2162{ 2249{
2163 bnad->num_txq_per_tx = 1; 2250 bnad->num_txq_per_tx = 1;
2164 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + 2251 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
@@ -2171,76 +2258,72 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2171 bnad->num_rxp_per_rx = 1; 2258 bnad->num_rxp_per_rx = 1;
2172} 2259}
2173 2260
2174/* Enable / disable device */ 2261/* Enable / disable ioceth */
2175static void 2262static int
2176bnad_device_disable(struct bnad *bnad) 2263bnad_ioceth_disable(struct bnad *bnad)
2177{ 2264{
2178 unsigned long flags; 2265 unsigned long flags;
2179 2266 int err = 0;
2180 init_completion(&bnad->bnad_completions.ioc_comp);
2181 2267
2182 spin_lock_irqsave(&bnad->bna_lock, flags); 2268 spin_lock_irqsave(&bnad->bna_lock, flags);
2183 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP); 2269 init_completion(&bnad->bnad_completions.ioc_comp);
2270 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2184 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2271 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2185 2272
2186 wait_for_completion(&bnad->bnad_completions.ioc_comp); 2273 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2274 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2275
2276 err = bnad->bnad_completions.ioc_comp_status;
2277 return err;
2187} 2278}
2188 2279
2189static int 2280static int
2190bnad_device_enable(struct bnad *bnad) 2281bnad_ioceth_enable(struct bnad *bnad)
2191{ 2282{
2192 int err = 0; 2283 int err = 0;
2193 unsigned long flags; 2284 unsigned long flags;
2194 2285
2195 init_completion(&bnad->bnad_completions.ioc_comp);
2196
2197 spin_lock_irqsave(&bnad->bna_lock, flags); 2286 spin_lock_irqsave(&bnad->bna_lock, flags);
2198 bna_device_enable(&bnad->bna.device); 2287 init_completion(&bnad->bnad_completions.ioc_comp);
2288 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2289 bna_ioceth_enable(&bnad->bna.ioceth);
2199 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2290 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2200 2291
2201 wait_for_completion(&bnad->bnad_completions.ioc_comp); 2292 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2293 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2202 2294
2203 if (bnad->bnad_completions.ioc_comp_status) 2295 err = bnad->bnad_completions.ioc_comp_status;
2204 err = bnad->bnad_completions.ioc_comp_status;
2205 2296
2206 return err; 2297 return err;
2207} 2298}
2208 2299
2209/* Free BNA resources */ 2300/* Free BNA resources */
2210static void 2301static void
2211bnad_res_free(struct bnad *bnad) 2302bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2303 u32 res_val_max)
2212{ 2304{
2213 int i; 2305 int i;
2214 struct bna_res_info *res_info = &bnad->res_info[0];
2215 2306
2216 for (i = 0; i < BNA_RES_T_MAX; i++) { 2307 for (i = 0; i < res_val_max; i++)
2217 if (res_info[i].res_type == BNA_RES_T_MEM) 2308 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2218 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2219 else
2220 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2221 }
2222} 2309}
2223 2310
2224/* Allocates memory and interrupt resources for BNA */ 2311/* Allocates memory and interrupt resources for BNA */
2225static int 2312static int
2226bnad_res_alloc(struct bnad *bnad) 2313bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2314 u32 res_val_max)
2227{ 2315{
2228 int i, err; 2316 int i, err;
2229 struct bna_res_info *res_info = &bnad->res_info[0];
2230 2317
2231 for (i = 0; i < BNA_RES_T_MAX; i++) { 2318 for (i = 0; i < res_val_max; i++) {
2232 if (res_info[i].res_type == BNA_RES_T_MEM) 2319 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2233 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2234 else
2235 err = bnad_mbox_irq_alloc(bnad,
2236 &res_info[i].res_u.intr_info);
2237 if (err) 2320 if (err)
2238 goto err_return; 2321 goto err_return;
2239 } 2322 }
2240 return 0; 2323 return 0;
2241 2324
2242err_return: 2325err_return:
2243 bnad_res_free(bnad); 2326 bnad_res_free(bnad, res_info, res_val_max);
2244 return err; 2327 return err;
2245} 2328}
2246 2329
@@ -2276,7 +2359,7 @@ bnad_enable_msix(struct bnad *bnad)
2276 2359
2277 spin_lock_irqsave(&bnad->bna_lock, flags); 2360 spin_lock_irqsave(&bnad->bna_lock, flags);
2278 /* ret = #of vectors that we got */ 2361 /* ret = #of vectors that we got */
2279 bnad_q_num_adjust(bnad, ret); 2362 bnad_q_num_adjust(bnad, ret, 0);
2280 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2363 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2281 2364
2282 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) 2365 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
@@ -2284,6 +2367,9 @@ bnad_enable_msix(struct bnad *bnad)
2284 * bnad->num_rxp_per_rx) + 2367 * bnad->num_rxp_per_rx) +
2285 BNAD_MAILBOX_MSIX_VECTORS; 2368 BNAD_MAILBOX_MSIX_VECTORS;
2286 2369
2370 if (bnad->msix_num > ret)
2371 goto intx_mode;
2372
2287 /* Try once more with adjusted numbers */ 2373 /* Try once more with adjusted numbers */
2288 /* If this fails, fall back to INTx */ 2374 /* If this fails, fall back to INTx */
2289 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, 2375 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
@@ -2293,6 +2379,9 @@ bnad_enable_msix(struct bnad *bnad)
2293 2379
2294 } else if (ret < 0) 2380 } else if (ret < 0)
2295 goto intx_mode; 2381 goto intx_mode;
2382
2383 pci_intx(bnad->pcidev, 0);
2384
2296 return; 2385 return;
2297 2386
2298intx_mode: 2387intx_mode:
@@ -2351,12 +2440,12 @@ bnad_open(struct net_device *netdev)
2351 pause_config.tx_pause = 0; 2440 pause_config.tx_pause = 0;
2352 pause_config.rx_pause = 0; 2441 pause_config.rx_pause = 0;
2353 2442
2354 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; 2443 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2355 2444
2356 spin_lock_irqsave(&bnad->bna_lock, flags); 2445 spin_lock_irqsave(&bnad->bna_lock, flags);
2357 bna_port_mtu_set(&bnad->bna.port, mtu, NULL); 2446 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2358 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL); 2447 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2359 bna_port_enable(&bnad->bna.port); 2448 bna_enet_enable(&bnad->bna.enet);
2360 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2449 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2361 2450
2362 /* Enable broadcast */ 2451 /* Enable broadcast */
@@ -2396,14 +2485,14 @@ bnad_stop(struct net_device *netdev)
2396 /* Stop the stats timer */ 2485 /* Stop the stats timer */
2397 bnad_stats_timer_stop(bnad); 2486 bnad_stats_timer_stop(bnad);
2398 2487
2399 init_completion(&bnad->bnad_completions.port_comp); 2488 init_completion(&bnad->bnad_completions.enet_comp);
2400 2489
2401 spin_lock_irqsave(&bnad->bna_lock, flags); 2490 spin_lock_irqsave(&bnad->bna_lock, flags);
2402 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP, 2491 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2403 bnad_cb_port_disabled); 2492 bnad_cb_enet_disabled);
2404 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2493 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2405 2494
2406 wait_for_completion(&bnad->bnad_completions.port_comp); 2495 wait_for_completion(&bnad->bnad_completions.enet_comp);
2407 2496
2408 bnad_cleanup_tx(bnad, 0); 2497 bnad_cleanup_tx(bnad, 0);
2409 bnad_cleanup_rx(bnad, 0); 2498 bnad_cleanup_rx(bnad, 0);
@@ -2425,19 +2514,18 @@ static netdev_tx_t
2425bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2514bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2426{ 2515{
2427 struct bnad *bnad = netdev_priv(netdev); 2516 struct bnad *bnad = netdev_priv(netdev);
2517 u32 txq_id = 0;
2518 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2428 2519
2429 u16 txq_prod, vlan_tag = 0; 2520 u16 txq_prod, vlan_tag = 0;
2430 u32 unmap_prod, wis, wis_used, wi_range; 2521 u32 unmap_prod, wis, wis_used, wi_range;
2431 u32 vectors, vect_id, i, acked; 2522 u32 vectors, vect_id, i, acked;
2432 u32 tx_id;
2433 int err; 2523 int err;
2434 2524
2435 struct bnad_tx_info *tx_info; 2525 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2436 struct bna_tcb *tcb;
2437 struct bnad_unmap_q *unmap_q;
2438 dma_addr_t dma_addr; 2526 dma_addr_t dma_addr;
2439 struct bna_txq_entry *txqent; 2527 struct bna_txq_entry *txqent;
2440 bna_txq_wi_ctrl_flag_t flags; 2528 u16 flags;
2441 2529
2442 if (unlikely 2530 if (unlikely
2443 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) { 2531 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
@@ -2445,15 +2533,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2445 return NETDEV_TX_OK; 2533 return NETDEV_TX_OK;
2446 } 2534 }
2447 2535
2448 tx_id = 0;
2449
2450 tx_info = &bnad->tx_info[tx_id];
2451 tcb = tx_info->tcb[tx_id];
2452 unmap_q = tcb->unmap_q;
2453
2454 /* 2536 /*
2455 * Takes care of the Tx that is scheduled between clearing the flag 2537 * Takes care of the Tx that is scheduled between clearing the flag
2456 * and the netif_stop_queue() call. 2538 * and the netif_stop_all_queue() call.
2457 */ 2539 */
2458 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { 2540 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2459 dev_kfree_skb(skb); 2541 dev_kfree_skb(skb);
@@ -2467,9 +2549,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2467 } 2549 }
2468 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ 2550 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2469 acked = 0; 2551 acked = 0;
2470 if (unlikely 2552 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2471 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || 2553 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2472 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2473 if ((u16) (*tcb->hw_consumer_index) != 2554 if ((u16) (*tcb->hw_consumer_index) !=
2474 tcb->consumer_index && 2555 tcb->consumer_index &&
2475 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { 2556 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
@@ -2602,7 +2683,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2602 2683
2603 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2684 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2604 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 2685 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2605 u32 size = frag->size; 2686 u16 size = frag->size;
2606 2687
2607 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) { 2688 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2608 vect_id = 0; 2689 vect_id = 0;
@@ -2760,11 +2841,25 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2760} 2841}
2761 2842
2762static int 2843static int
2763bnad_change_mtu(struct net_device *netdev, int new_mtu) 2844bnad_mtu_set(struct bnad *bnad, int mtu)
2764{ 2845{
2765 int mtu, err = 0;
2766 unsigned long flags; 2846 unsigned long flags;
2767 2847
2848 init_completion(&bnad->bnad_completions.mtu_comp);
2849
2850 spin_lock_irqsave(&bnad->bna_lock, flags);
2851 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2852 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2853
2854 wait_for_completion(&bnad->bnad_completions.mtu_comp);
2855
2856 return bnad->bnad_completions.mtu_comp_status;
2857}
2858
2859static int
2860bnad_change_mtu(struct net_device *netdev, int new_mtu)
2861{
2862 int err, mtu = netdev->mtu;
2768 struct bnad *bnad = netdev_priv(netdev); 2863 struct bnad *bnad = netdev_priv(netdev);
2769 2864
2770 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) 2865 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
@@ -2774,11 +2869,10 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
2774 2869
2775 netdev->mtu = new_mtu; 2870 netdev->mtu = new_mtu;
2776 2871
2777 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN; 2872 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2778 2873 err = bnad_mtu_set(bnad, mtu);
2779 spin_lock_irqsave(&bnad->bna_lock, flags); 2874 if (err)
2780 bna_port_mtu_set(&bnad->bna.port, mtu, NULL); 2875 err = -EBUSY;
2781 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2782 2876
2783 mutex_unlock(&bnad->conf_mutex); 2877 mutex_unlock(&bnad->conf_mutex);
2784 return err; 2878 return err;
@@ -2968,7 +3062,7 @@ bnad_uninit(struct bnad *bnad)
2968 3062
2969/* 3063/*
2970 * Initialize locks 3064 * Initialize locks
2971 a) Per device mutes used for serializing configuration 3065 a) Per ioceth mutes used for serializing configuration
2972 changes from OS interface 3066 changes from OS interface
2973 b) spin lock used to protect bna state machine 3067 b) spin lock used to protect bna state machine
2974 */ 3068 */
@@ -3058,12 +3152,15 @@ bnad_pci_probe(struct pci_dev *pdev,
3058 */ 3152 */
3059 netdev = alloc_etherdev(sizeof(struct bnad)); 3153 netdev = alloc_etherdev(sizeof(struct bnad));
3060 if (!netdev) { 3154 if (!netdev) {
3061 dev_err(&pdev->dev, "alloc_etherdev failed\n"); 3155 dev_err(&pdev->dev, "netdev allocation failed\n");
3062 err = -ENOMEM; 3156 err = -ENOMEM;
3063 return err; 3157 return err;
3064 } 3158 }
3065 bnad = netdev_priv(netdev); 3159 bnad = netdev_priv(netdev);
3066 3160
3161 bnad_lock_init(bnad);
3162
3163 mutex_lock(&bnad->conf_mutex);
3067 /* 3164 /*
3068 * PCI initialization 3165 * PCI initialization
3069 * Output : using_dac = 1 for 64 bit DMA 3166 * Output : using_dac = 1 for 64 bit DMA
@@ -3073,7 +3170,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3073 if (err) 3170 if (err)
3074 goto free_netdev; 3171 goto free_netdev;
3075 3172
3076 bnad_lock_init(bnad);
3077 /* 3173 /*
3078 * Initialize bnad structure 3174 * Initialize bnad structure
3079 * Setup relation between pci_dev & netdev 3175 * Setup relation between pci_dev & netdev
@@ -3082,21 +3178,22 @@ bnad_pci_probe(struct pci_dev *pdev,
3082 err = bnad_init(bnad, pdev, netdev); 3178 err = bnad_init(bnad, pdev, netdev);
3083 if (err) 3179 if (err)
3084 goto pci_uninit; 3180 goto pci_uninit;
3181
3085 /* Initialize netdev structure, set up ethtool ops */ 3182 /* Initialize netdev structure, set up ethtool ops */
3086 bnad_netdev_init(bnad, using_dac); 3183 bnad_netdev_init(bnad, using_dac);
3087 3184
3088 /* Set link to down state */ 3185 /* Set link to down state */
3089 netif_carrier_off(netdev); 3186 netif_carrier_off(netdev);
3090 3187
3091 bnad_enable_msix(bnad);
3092
3093 /* Get resource requirement form bna */ 3188 /* Get resource requirement form bna */
3189 spin_lock_irqsave(&bnad->bna_lock, flags);
3094 bna_res_req(&bnad->res_info[0]); 3190 bna_res_req(&bnad->res_info[0]);
3191 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3095 3192
3096 /* Allocate resources from bna */ 3193 /* Allocate resources from bna */
3097 err = bnad_res_alloc(bnad); 3194 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3098 if (err) 3195 if (err)
3099 goto free_netdev; 3196 goto drv_uninit;
3100 3197
3101 bna = &bnad->bna; 3198 bna = &bnad->bna;
3102 3199
@@ -3106,69 +3203,102 @@ bnad_pci_probe(struct pci_dev *pdev,
3106 pcidev_info.device_id = bnad->pcidev->device; 3203 pcidev_info.device_id = bnad->pcidev->device;
3107 pcidev_info.pci_bar_kva = bnad->bar0; 3204 pcidev_info.pci_bar_kva = bnad->bar0;
3108 3205
3109 mutex_lock(&bnad->conf_mutex);
3110
3111 spin_lock_irqsave(&bnad->bna_lock, flags); 3206 spin_lock_irqsave(&bnad->bna_lock, flags);
3112 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); 3207 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3113 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3208 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3114 3209
3115 bnad->stats.bna_stats = &bna->stats; 3210 bnad->stats.bna_stats = &bna->stats;
3116 3211
3212 bnad_enable_msix(bnad);
3213 err = bnad_mbox_irq_alloc(bnad);
3214 if (err)
3215 goto res_free;
3216
3217
3117 /* Set up timers */ 3218 /* Set up timers */
3118 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout, 3219 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3119 ((unsigned long)bnad)); 3220 ((unsigned long)bnad));
3120 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, 3221 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3121 ((unsigned long)bnad)); 3222 ((unsigned long)bnad));
3122 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout, 3223 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3123 ((unsigned long)bnad)); 3224 ((unsigned long)bnad));
3124 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout, 3225 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3125 ((unsigned long)bnad)); 3226 ((unsigned long)bnad));
3126 3227
3127 /* Now start the timer before calling IOC */ 3228 /* Now start the timer before calling IOC */
3128 mod_timer(&bnad->bna.device.ioc.iocpf_timer, 3229 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3129 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); 3230 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3130 3231
3131 /* 3232 /*
3132 * Start the chip 3233 * Start the chip
3133 * Don't care even if err != 0, bna state machine will 3234 * If the call back comes with error, we bail out.
3134 * deal with it 3235 * This is a catastrophic error.
3135 */ 3236 */
3136 err = bnad_device_enable(bnad); 3237 err = bnad_ioceth_enable(bnad);
3238 if (err) {
3239 pr_err("BNA: Initialization failed err=%d\n",
3240 err);
3241 goto probe_success;
3242 }
3243
3244 spin_lock_irqsave(&bnad->bna_lock, flags);
3245 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3246 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3247 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3248 bna_attr(bna)->num_rxp - 1);
3249 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3250 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3251 err = -EIO;
3252 }
3253 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3254 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3255
3256 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3257 if (err)
3258 goto disable_ioceth;
3259
3260 spin_lock_irqsave(&bnad->bna_lock, flags);
3261 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3262 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3137 3263
3138 /* Get the burnt-in mac */ 3264 /* Get the burnt-in mac */
3139 spin_lock_irqsave(&bnad->bna_lock, flags); 3265 spin_lock_irqsave(&bnad->bna_lock, flags);
3140 bna_port_mac_get(&bna->port, &bnad->perm_addr); 3266 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3141 bnad_set_netdev_perm_addr(bnad); 3267 bnad_set_netdev_perm_addr(bnad);
3142 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3268 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3143 3269
3144 mutex_unlock(&bnad->conf_mutex);
3145
3146 /* Finally, reguister with net_device layer */ 3270 /* Finally, reguister with net_device layer */
3147 err = register_netdev(netdev); 3271 err = register_netdev(netdev);
3148 if (err) { 3272 if (err) {
3149 pr_err("BNA : Registering with netdev failed\n"); 3273 pr_err("BNA : Registering with netdev failed\n");
3150 goto disable_device; 3274 goto probe_uninit;
3151 } 3275 }
3276 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3152 3277
3278probe_success:
3279 mutex_unlock(&bnad->conf_mutex);
3153 return 0; 3280 return 0;
3154 3281
3155disable_device: 3282probe_uninit:
3156 mutex_lock(&bnad->conf_mutex); 3283 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3157 bnad_device_disable(bnad); 3284disable_ioceth:
3158 del_timer_sync(&bnad->bna.device.ioc.ioc_timer); 3285 bnad_ioceth_disable(bnad);
3159 del_timer_sync(&bnad->bna.device.ioc.sem_timer); 3286 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3160 del_timer_sync(&bnad->bna.device.ioc.hb_timer); 3287 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3288 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3161 spin_lock_irqsave(&bnad->bna_lock, flags); 3289 spin_lock_irqsave(&bnad->bna_lock, flags);
3162 bna_uninit(bna); 3290 bna_uninit(bna);
3163 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3291 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3164 mutex_unlock(&bnad->conf_mutex); 3292 bnad_mbox_irq_free(bnad);
3165
3166 bnad_res_free(bnad);
3167 bnad_disable_msix(bnad); 3293 bnad_disable_msix(bnad);
3294res_free:
3295 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3296drv_uninit:
3297 bnad_uninit(bnad);
3168pci_uninit: 3298pci_uninit:
3169 bnad_pci_uninit(pdev); 3299 bnad_pci_uninit(pdev);
3300 mutex_unlock(&bnad->conf_mutex);
3170 bnad_lock_uninit(bnad); 3301 bnad_lock_uninit(bnad);
3171 bnad_uninit(bnad);
3172free_netdev: 3302free_netdev:
3173 free_netdev(netdev); 3303 free_netdev(netdev);
3174 return err; 3304 return err;
@@ -3189,21 +3319,24 @@ bnad_pci_remove(struct pci_dev *pdev)
3189 bnad = netdev_priv(netdev); 3319 bnad = netdev_priv(netdev);
3190 bna = &bnad->bna; 3320 bna = &bnad->bna;
3191 3321
3192 unregister_netdev(netdev); 3322 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3323 unregister_netdev(netdev);
3193 3324
3194 mutex_lock(&bnad->conf_mutex); 3325 mutex_lock(&bnad->conf_mutex);
3195 bnad_device_disable(bnad); 3326 bnad_ioceth_disable(bnad);
3196 del_timer_sync(&bnad->bna.device.ioc.ioc_timer); 3327 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3197 del_timer_sync(&bnad->bna.device.ioc.sem_timer); 3328 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3198 del_timer_sync(&bnad->bna.device.ioc.hb_timer); 3329 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3199 spin_lock_irqsave(&bnad->bna_lock, flags); 3330 spin_lock_irqsave(&bnad->bna_lock, flags);
3200 bna_uninit(bna); 3331 bna_uninit(bna);
3201 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3332 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3202 mutex_unlock(&bnad->conf_mutex);
3203 3333
3204 bnad_res_free(bnad); 3334 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3335 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3336 bnad_mbox_irq_free(bnad);
3205 bnad_disable_msix(bnad); 3337 bnad_disable_msix(bnad);
3206 bnad_pci_uninit(pdev); 3338 bnad_pci_uninit(pdev);
3339 mutex_unlock(&bnad->conf_mutex);
3207 bnad_lock_uninit(bnad); 3340 bnad_lock_uninit(bnad);
3208 bnad_uninit(bnad); 3341 bnad_uninit(bnad);
3209 free_netdev(netdev); 3342 free_netdev(netdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 458eb30371b5..a538cf4383b1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -44,6 +44,7 @@
44 44
45#define BNAD_MAX_RXS 1 45#define BNAD_MAX_RXS 1
46#define BNAD_MAX_RXPS_PER_RX 16 46#define BNAD_MAX_RXPS_PER_RX 16
47#define BNAD_MAX_RXQ_PER_RXP 2
47 48
48/* 49/*
49 * Control structure pointed to ccb->ctrl, which 50 * Control structure pointed to ccb->ctrl, which
@@ -76,6 +77,8 @@ struct bnad_rx_ctrl {
76#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */ 77#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
77#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */ 78#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
78 79
80#define BNAD_IOCETH_TIMEOUT 10000
81
79#define BNAD_MAX_Q_DEPTH 0x10000 82#define BNAD_MAX_Q_DEPTH 0x10000
80#define BNAD_MIN_Q_DEPTH 0x200 83#define BNAD_MIN_Q_DEPTH 0x200
81 84
@@ -93,6 +96,10 @@ struct bnad_rx_ctrl {
93#define BNAD_RXQ_REFILL 0 96#define BNAD_RXQ_REFILL 0
94#define BNAD_RXQ_STARTED 1 97#define BNAD_RXQ_STARTED 1
95 98
99/* Resource limits */
100#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
101#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
102
96/* 103/*
97 * DATA STRUCTURES 104 * DATA STRUCTURES
98 */ 105 */
@@ -115,7 +122,8 @@ struct bnad_completion {
115 struct completion tx_comp; 122 struct completion tx_comp;
116 struct completion rx_comp; 123 struct completion rx_comp;
117 struct completion stats_comp; 124 struct completion stats_comp;
118 struct completion port_comp; 125 struct completion enet_comp;
126 struct completion mtu_comp;
119 127
120 u8 ioc_comp_status; 128 u8 ioc_comp_status;
121 u8 ucast_comp_status; 129 u8 ucast_comp_status;
@@ -124,6 +132,7 @@ struct bnad_completion {
124 u8 rx_comp_status; 132 u8 rx_comp_status;
125 u8 stats_comp_status; 133 u8 stats_comp_status;
126 u8 port_comp_status; 134 u8 port_comp_status;
135 u8 mtu_comp_status;
127}; 136};
128 137
129/* Tx Rx Control Stats */ 138/* Tx Rx Control Stats */
@@ -145,6 +154,7 @@ struct bnad_drv_stats {
145 u64 netif_rx_dropped; 154 u64 netif_rx_dropped;
146 155
147 u64 link_toggle; 156 u64 link_toggle;
157 u64 cee_toggle;
148 u64 cee_up; 158 u64 cee_up;
149 159
150 u64 rxp_info_alloc_failed; 160 u64 rxp_info_alloc_failed;
@@ -174,12 +184,14 @@ struct bnad_rx_res_info {
174struct bnad_tx_info { 184struct bnad_tx_info {
175 struct bna_tx *tx; /* 1:1 between tx_info & tx */ 185 struct bna_tx *tx; /* 1:1 between tx_info & tx */
176 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; 186 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
187 u32 tx_id;
177} ____cacheline_aligned; 188} ____cacheline_aligned;
178 189
179struct bnad_rx_info { 190struct bnad_rx_info {
180 struct bna_rx *rx; /* 1:1 between rx_info & rx */ 191 struct bna_rx *rx; /* 1:1 between rx_info & rx */
181 192
182 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX]; 193 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
194 u32 rx_id;
183} ____cacheline_aligned; 195} ____cacheline_aligned;
184 196
185/* Unmap queues for Tx / Rx cleanup */ 197/* Unmap queues for Tx / Rx cleanup */
@@ -205,13 +217,18 @@ struct bnad_unmap_q {
205/* Defines for run_flags bit-mask */ 217/* Defines for run_flags bit-mask */
206/* Set, tested & cleared using xxx_bit() functions */ 218/* Set, tested & cleared using xxx_bit() functions */
207/* Values indicated bit positions */ 219/* Values indicated bit positions */
208#define BNAD_RF_CEE_RUNNING 1 220#define BNAD_RF_CEE_RUNNING 0
221#define BNAD_RF_MTU_SET 1
209#define BNAD_RF_MBOX_IRQ_DISABLED 2 222#define BNAD_RF_MBOX_IRQ_DISABLED 2
210#define BNAD_RF_RX_STARTED 3 223#define BNAD_RF_NETDEV_REGISTERED 3
211#define BNAD_RF_DIM_TIMER_RUNNING 4 224#define BNAD_RF_DIM_TIMER_RUNNING 4
212#define BNAD_RF_STATS_TIMER_RUNNING 5 225#define BNAD_RF_STATS_TIMER_RUNNING 5
213#define BNAD_RF_TX_SHUTDOWN_DELAYED 6 226#define BNAD_RF_TX_PRIO_SET 6
214#define BNAD_RF_RX_SHUTDOWN_DELAYED 7 227
228
229/* Define for Fast Path flags */
230/* Defined as bit positions */
231#define BNAD_FP_IN_RX_PATH 0
215 232
216struct bnad { 233struct bnad {
217 struct net_device *netdev; 234 struct net_device *netdev;
@@ -265,6 +282,7 @@ struct bnad {
265 282
266 /* Control path resources, memory & irq */ 283 /* Control path resources, memory & irq */
267 struct bna_res_info res_info[BNA_RES_T_MAX]; 284 struct bna_res_info res_info[BNA_RES_T_MAX];
285 struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
268 struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS]; 286 struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
269 struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS]; 287 struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
270 288
@@ -302,10 +320,10 @@ extern void bnad_set_ethtool_ops(struct net_device *netdev);
302extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad); 320extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
303extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad); 321extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
304 322
305extern int bnad_setup_rx(struct bnad *bnad, uint rx_id); 323extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
306extern int bnad_setup_tx(struct bnad *bnad, uint tx_id); 324extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
307extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id); 325extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id);
308extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id); 326extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id);
309 327
310/* Timer start/stop protos */ 328/* Timer start/stop protos */
311extern void bnad_dim_timer_start(struct bnad *bnad); 329extern void bnad_dim_timer_start(struct bnad *bnad);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 49174f87f4d1..1c19dcea83c2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -29,14 +29,14 @@
29 29
30#define BNAD_NUM_TXF_COUNTERS 12 30#define BNAD_NUM_TXF_COUNTERS 12
31#define BNAD_NUM_RXF_COUNTERS 10 31#define BNAD_NUM_RXF_COUNTERS 10
32#define BNAD_NUM_CQ_COUNTERS 3 32#define BNAD_NUM_CQ_COUNTERS (3 + 5)
33#define BNAD_NUM_RXQ_COUNTERS 6 33#define BNAD_NUM_RXQ_COUNTERS 6
34#define BNAD_NUM_TXQ_COUNTERS 5 34#define BNAD_NUM_TXQ_COUNTERS 5
35 35
36#define BNAD_ETHTOOL_STATS_NUM \ 36#define BNAD_ETHTOOL_STATS_NUM \
37 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ 37 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
38 sizeof(struct bnad_drv_stats) / sizeof(u64) + \ 38 sizeof(struct bnad_drv_stats) / sizeof(u64) + \
39 offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64)) 39 offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
40 40
41static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { 41static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
42 "rx_packets", 42 "rx_packets",
@@ -277,7 +277,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
277 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); 277 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
278 if (ioc_attr) { 278 if (ioc_attr) {
279 spin_lock_irqsave(&bnad->bna_lock, flags); 279 spin_lock_irqsave(&bnad->bna_lock, flags);
280 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); 280 bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
281 spin_unlock_irqrestore(&bnad->bna_lock, flags); 281 spin_unlock_irqrestore(&bnad->bna_lock, flags);
282 282
283 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, 283 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
@@ -462,8 +462,8 @@ bnad_get_pauseparam(struct net_device *netdev,
462 struct bnad *bnad = netdev_priv(netdev); 462 struct bnad *bnad = netdev_priv(netdev);
463 463
464 pauseparam->autoneg = 0; 464 pauseparam->autoneg = 0;
465 pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause; 465 pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
466 pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause; 466 pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
467} 467}
468 468
469static int 469static int
@@ -478,12 +478,12 @@ bnad_set_pauseparam(struct net_device *netdev,
478 return -EINVAL; 478 return -EINVAL;
479 479
480 mutex_lock(&bnad->conf_mutex); 480 mutex_lock(&bnad->conf_mutex);
481 if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause || 481 if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
482 pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) { 482 pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
483 pause_config.rx_pause = pauseparam->rx_pause; 483 pause_config.rx_pause = pauseparam->rx_pause;
484 pause_config.tx_pause = pauseparam->tx_pause; 484 pause_config.tx_pause = pauseparam->tx_pause;
485 spin_lock_irqsave(&bnad->bna_lock, flags); 485 spin_lock_irqsave(&bnad->bna_lock, flags);
486 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL); 486 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
487 spin_unlock_irqrestore(&bnad->bna_lock, flags); 487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
488 } 488 }
489 mutex_unlock(&bnad->conf_mutex); 489 mutex_unlock(&bnad->conf_mutex);
@@ -495,7 +495,7 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
495{ 495{
496 struct bnad *bnad = netdev_priv(netdev); 496 struct bnad *bnad = netdev_priv(netdev);
497 int i, j, q_num; 497 int i, j, q_num;
498 u64 bmap; 498 u32 bmap;
499 499
500 mutex_lock(&bnad->conf_mutex); 500 mutex_lock(&bnad->conf_mutex);
501 501
@@ -508,9 +508,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
508 ETH_GSTRING_LEN); 508 ETH_GSTRING_LEN);
509 string += ETH_GSTRING_LEN; 509 string += ETH_GSTRING_LEN;
510 } 510 }
511 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | 511 bmap = bna_tx_rid_mask(&bnad->bna);
512 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); 512 for (i = 0; bmap; i++) {
513 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
514 if (bmap & 1) { 513 if (bmap & 1) {
515 sprintf(string, "txf%d_ucast_octets", i); 514 sprintf(string, "txf%d_ucast_octets", i);
516 string += ETH_GSTRING_LEN; 515 string += ETH_GSTRING_LEN;
@@ -540,9 +539,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
540 bmap >>= 1; 539 bmap >>= 1;
541 } 540 }
542 541
543 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | 542 bmap = bna_rx_rid_mask(&bnad->bna);
544 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); 543 for (i = 0; bmap; i++) {
545 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
546 if (bmap & 1) { 544 if (bmap & 1) {
547 sprintf(string, "rxf%d_ucast_octets", i); 545 sprintf(string, "rxf%d_ucast_octets", i);
548 string += ETH_GSTRING_LEN; 546 string += ETH_GSTRING_LEN;
@@ -663,18 +661,16 @@ bnad_get_stats_count_locked(struct net_device *netdev)
663{ 661{
664 struct bnad *bnad = netdev_priv(netdev); 662 struct bnad *bnad = netdev_priv(netdev);
665 int i, j, count, rxf_active_num = 0, txf_active_num = 0; 663 int i, j, count, rxf_active_num = 0, txf_active_num = 0;
666 u64 bmap; 664 u32 bmap;
667 665
668 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | 666 bmap = bna_tx_rid_mask(&bnad->bna);
669 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); 667 for (i = 0; bmap; i++) {
670 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
671 if (bmap & 1) 668 if (bmap & 1)
672 txf_active_num++; 669 txf_active_num++;
673 bmap >>= 1; 670 bmap >>= 1;
674 } 671 }
675 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | 672 bmap = bna_rx_rid_mask(&bnad->bna);
676 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); 673 for (i = 0; bmap; i++) {
677 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
678 if (bmap & 1) 674 if (bmap & 1)
679 rxf_active_num++; 675 rxf_active_num++;
680 bmap >>= 1; 676 bmap >>= 1;
@@ -787,7 +783,7 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
787 unsigned long flags; 783 unsigned long flags;
788 struct rtnl_link_stats64 *net_stats64; 784 struct rtnl_link_stats64 *net_stats64;
789 u64 *stats64; 785 u64 *stats64;
790 u64 bmap; 786 u32 bmap;
791 787
792 mutex_lock(&bnad->conf_mutex); 788 mutex_lock(&bnad->conf_mutex);
793 if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { 789 if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
@@ -818,20 +814,20 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
818 buf[bi++] = stats64[i]; 814 buf[bi++] = stats64[i];
819 815
820 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ 816 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
821 stats64 = (u64 *) bnad->stats.bna_stats->hw_stats; 817 stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
822 for (i = 0; 818 for (i = 0;
823 i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64); 819 i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
820 sizeof(u64);
824 i++) 821 i++)
825 buf[bi++] = stats64[i]; 822 buf[bi++] = stats64[i];
826 823
827 /* Fill txf stats into ethtool buffers */ 824 /* Fill txf stats into ethtool buffers */
828 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] | 825 bmap = bna_tx_rid_mask(&bnad->bna);
829 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32); 826 for (i = 0; bmap; i++) {
830 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
831 if (bmap & 1) { 827 if (bmap & 1) {
832 stats64 = (u64 *)&bnad->stats.bna_stats-> 828 stats64 = (u64 *)&bnad->stats.bna_stats->
833 hw_stats->txf_stats[i]; 829 hw_stats.txf_stats[i];
834 for (j = 0; j < sizeof(struct bfi_ll_stats_txf) / 830 for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
835 sizeof(u64); j++) 831 sizeof(u64); j++)
836 buf[bi++] = stats64[j]; 832 buf[bi++] = stats64[j];
837 } 833 }
@@ -839,13 +835,12 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
839 } 835 }
840 836
841 /* Fill rxf stats into ethtool buffers */ 837 /* Fill rxf stats into ethtool buffers */
842 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] | 838 bmap = bna_rx_rid_mask(&bnad->bna);
843 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32); 839 for (i = 0; bmap; i++) {
844 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
845 if (bmap & 1) { 840 if (bmap & 1) {
846 stats64 = (u64 *)&bnad->stats.bna_stats-> 841 stats64 = (u64 *)&bnad->stats.bna_stats->
847 hw_stats->rxf_stats[i]; 842 hw_stats.rxf_stats[i];
848 for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) / 843 for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
849 sizeof(u64); j++) 844 sizeof(u64); j++)
850 buf[bi++] = stats64[j]; 845 buf[bi++] = stats64[j];
851 } 846 }
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index a679e038747b..50fce15feacc 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -40,7 +40,7 @@
40 40
41extern char bfa_version[]; 41extern char bfa_version[];
42 42
43#define CNA_FW_FILE_CT "ctfw_cna.bin" 43#define CNA_FW_FILE_CT "ctfw.bin"
44#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ 44#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
45 45
46#pragma pack(1) 46#pragma pack(1)
@@ -77,4 +77,33 @@ typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
77 } \ 77 } \
78} 78}
79 79
80/*
81 * bfa_q_deq_tail - dequeue an element from tail of the queue
82 */
83#define bfa_q_deq_tail(_q, _qe) { \
84 if (!list_empty(_q)) { \
85 *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
86 bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
87 (struct list_head *) (_q); \
88 bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
89 bfa_q_qe_init(*((struct list_head **) _qe)); \
90 } else { \
91 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
92 } \
93}
94
95/*
96 * bfa_add_tail_head - enqueue an element at the head of queue
97 */
98#define bfa_q_enq_head(_q, _qe) { \
99 if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
100 pr_err("Assertion failure: %s:%d: %d", \
101 __FILE__, __LINE__, \
102 (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
103 bfa_q_next(_qe) = bfa_q_next(_q); \
104 bfa_q_prev(_qe) = (struct list_head *) (_q); \
105 bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
106 bfa_q_next(_q) = (struct list_head *) (_qe); \
107}
108
80#endif /* __CNA_H__ */ 109#endif /* __CNA_H__ */