aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorRasesh Mody <rmody@brocade.com>2011-08-08 12:21:36 -0400
committerDavid S. Miller <davem@davemloft.net>2011-08-11 10:30:12 -0400
commit45979c1e424f6a14495a4988343df176cb745f84 (patch)
tree6cca0a555549c5d98534af18ca3f9c71c41c8ea6 /drivers/net
parentaf027a34f34a8c0794a72dae8367e268eae89dbb (diff)
bna: Introduce ENET as New Driver and FW Interface
Change details: - This patch contains the messages, opcodes and structure format for the messages and responses exchanged between driver and the FW. In addition this patch contains the state machine implementation for Ethport, Enet, IOCEth. - Ethport object is responsible for receiving link state events, sending port enable/disable commands to FW. - Enet object is responsible for synchronizing initialization/teardown of tx & rx datapath configuration. - IOCEth object is responsible for init/un-init of IO Controller in the adapter which runs the FW. - This patch also contains code for initialization and resource assignment for Ethport, Enet, IOCEth, Tx, Rx objects. Signed-off-by: Rasesh Mody <rmody@brocade.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h901
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c2129
2 files changed, 3030 insertions, 0 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
new file mode 100644
index 000000000000..a90f1cf46b41
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -0,0 +1,901 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/**
20 * @file bfi_enet.h BNA Hardware and Firmware Interface
21 */
22
23/**
24 * Skipping statistics collection to avoid clutter.
25 * Command is no longer needed:
26 * MTU
27 * TxQ Stop
28 * RxQ Stop
29 * RxF Enable/Disable
30 *
31 * HDS-off request is dynamic
32 * keep structures as multiple of 32-bit fields for alignment.
33 * All values must be written in big-endian.
34 */
35#ifndef __BFI_ENET_H__
36#define __BFI_ENET_H__
37
38#include "bfa_defs.h"
39#include "bfi.h"
40
41#pragma pack(1)
42
43#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */
44
45#define BFI_ENET_TXQ_PRIO_MAX 8
46#define BFI_ENET_RX_QSET_MAX 16
47#define BFI_ENET_TXQ_WI_VECT_MAX 4
48
49#define BFI_ENET_VLAN_ID_MAX 4096
50#define BFI_ENET_VLAN_BLOCK_SIZE 512 /* in bits */
51#define BFI_ENET_VLAN_BLOCKS_MAX \
52 (BFI_ENET_VLAN_ID_MAX / BFI_ENET_VLAN_BLOCK_SIZE)
53#define BFI_ENET_VLAN_WORD_SIZE 32 /* in bits */
54#define BFI_ENET_VLAN_WORDS_MAX \
55 (BFI_ENET_VLAN_BLOCK_SIZE / BFI_ENET_VLAN_WORD_SIZE)
56
57#define BFI_ENET_RSS_RIT_MAX 64 /* entries */
58#define BFI_ENET_RSS_KEY_LEN 10 /* 32-bit words */
59
60union bfi_addr_be_u {
61 struct {
62 u32 addr_hi; /* Most Significant 32-bits */
63 u32 addr_lo; /* Least Significant 32-Bits */
64 } a32;
65};
66
67/**
68 * T X Q U E U E D E F I N E S
69 */
70/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
71/* TxQ Entry Opcodes */
72#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
73#define BFI_ENET_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
74#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */
75
76/* TxQ Entry Control Flags */
77#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8)
78#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5)
79#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4)
80#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3)
81#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2)
82#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1)
83#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0)
84
85struct bfi_enet_txq_wi_base {
86 u8 reserved;
87 u8 num_vectors; /* number of vectors present */
88 u16 opcode;
89 /* BFI_ENET_TXQ_WI_SEND or BFI_ENET_TXQ_WI_SEND_LSO */
90 u16 flags; /* OR of all the flags */
91 u16 l4_hdr_size_n_offset;
92 u16 vlan_tag;
93 u16 lso_mss; /* Only 14 LSB are valid */
94 u32 frame_length; /* Only 24 LSB are valid */
95};
96
97struct bfi_enet_txq_wi_ext {
98 u16 reserved;
99 u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */
100 u32 reserved2[3];
101};
102
103struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
104 u16 reserved;
105 u16 length; /* Only 14 LSB are valid */
106 union bfi_addr_be_u addr;
107};
108
109/**
110 * TxQ Entry Structure
111 *
112 */
113struct bfi_enet_txq_entry {
114 union {
115 struct bfi_enet_txq_wi_base base;
116 struct bfi_enet_txq_wi_ext ext;
117 } wi;
118 struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
119};
120
121#define wi_hdr wi.base
122#define wi_ext_hdr wi.ext
123
124#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
125 (((_hdr_size) << 10) | ((_offset) & 0x3FF))
126
127/**
128 * R X Q U E U E D E F I N E S
129 */
130struct bfi_enet_rxq_entry {
131 union bfi_addr_be_u rx_buffer;
132};
133
134/**
135 * R X C O M P L E T I O N Q U E U E D E F I N E S
136 */
137/* CQ Entry Flags */
138#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
139#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
140#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2)
141#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3)
142
143#define BFI_ENET_CQ_EF_RSVD1 (1 << 4)
144#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5)
145#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6)
146#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7)
147
148#define BFI_ENET_CQ_EF_UDP (1 << 8)
149#define BFI_ENET_CQ_EF_TCP (1 << 9)
150#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10)
151#define BFI_ENET_CQ_EF_IPV6 (1 << 11)
152
153#define BFI_ENET_CQ_EF_IPV4 (1 << 12)
154#define BFI_ENET_CQ_EF_VLAN (1 << 13)
155#define BFI_ENET_CQ_EF_RSS (1 << 14)
156#define BFI_ENET_CQ_EF_RSVD2 (1 << 15)
157
158#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16)
159#define BFI_ENET_CQ_EF_MCAST (1 << 17)
160#define BFI_ENET_CQ_EF_BCAST (1 << 18)
161#define BFI_ENET_CQ_EF_REMOTE (1 << 19)
162
163#define BFI_ENET_CQ_EF_LOCAL (1 << 20)
164
165/* CQ Entry Structure */
166struct bfi_enet_cq_entry {
167 u32 flags;
168 u16 vlan_tag;
169 u16 length;
170 u32 rss_hash;
171 u8 valid;
172 u8 reserved1;
173 u8 reserved2;
174 u8 rxq_id;
175};
176
177/**
178 * E N E T C O N T R O L P A T H C O M M A N D S
179 */
180struct bfi_enet_q {
181 union bfi_addr_u pg_tbl;
182 union bfi_addr_u first_entry;
183 u16 pages; /* # of pages */
184 u16 page_sz;
185};
186
187struct bfi_enet_txq {
188 struct bfi_enet_q q;
189 u8 priority;
190 u8 rsvd[3];
191};
192
193struct bfi_enet_rxq {
194 struct bfi_enet_q q;
195 u16 rx_buffer_size;
196 u16 rsvd;
197};
198
199struct bfi_enet_cq {
200 struct bfi_enet_q q;
201};
202
203struct bfi_enet_ib_cfg {
204 u8 int_pkt_dma;
205 u8 int_enabled;
206 u8 int_pkt_enabled;
207 u8 continuous_coalescing;
208 u8 msix;
209 u8 rsvd[3];
210 u32 coalescing_timeout;
211 u32 inter_pkt_timeout;
212 u8 inter_pkt_count;
213 u8 rsvd1[3];
214};
215
216struct bfi_enet_ib {
217 union bfi_addr_u index_addr;
218 union {
219 u16 msix_index;
220 u16 intx_bitmask;
221 } intr;
222 u16 rsvd;
223};
224
225/**
226 * ENET command messages
227 */
228enum bfi_enet_h2i_msgs {
229 /* Rx Commands */
230 BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
231 BFI_ENET_H2I_RX_CFG_CLR_REQ = 2,
232
233 BFI_ENET_H2I_RIT_CFG_REQ = 3,
234 BFI_ENET_H2I_RSS_CFG_REQ = 4,
235 BFI_ENET_H2I_RSS_ENABLE_REQ = 5,
236 BFI_ENET_H2I_RX_PROMISCUOUS_REQ = 6,
237 BFI_ENET_H2I_RX_DEFAULT_REQ = 7,
238
239 BFI_ENET_H2I_MAC_UCAST_SET_REQ = 8,
240 BFI_ENET_H2I_MAC_UCAST_CLR_REQ = 9,
241 BFI_ENET_H2I_MAC_UCAST_ADD_REQ = 10,
242 BFI_ENET_H2I_MAC_UCAST_DEL_REQ = 11,
243
244 BFI_ENET_H2I_MAC_MCAST_ADD_REQ = 12,
245 BFI_ENET_H2I_MAC_MCAST_DEL_REQ = 13,
246 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ = 14,
247
248 BFI_ENET_H2I_RX_VLAN_SET_REQ = 15,
249 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ = 16,
250
251 /* Tx Commands */
252 BFI_ENET_H2I_TX_CFG_SET_REQ = 17,
253 BFI_ENET_H2I_TX_CFG_CLR_REQ = 18,
254
255 /* Port Commands */
256 BFI_ENET_H2I_PORT_ADMIN_UP_REQ = 19,
257 BFI_ENET_H2I_SET_PAUSE_REQ = 20,
258 BFI_ENET_H2I_DIAG_LOOPBACK_REQ = 21,
259
260 /* Get Attributes Command */
261 BFI_ENET_H2I_GET_ATTR_REQ = 22,
262
263 /* Statistics Commands */
264 BFI_ENET_H2I_STATS_GET_REQ = 23,
265 BFI_ENET_H2I_STATS_CLR_REQ = 24,
266
267 BFI_ENET_H2I_WOL_MAGIC_REQ = 25,
268 BFI_ENET_H2I_WOL_FRAME_REQ = 26,
269
270 BFI_ENET_H2I_MAX = 27,
271};
272
273enum bfi_enet_i2h_msgs {
274 /* Rx Responses */
275 BFI_ENET_I2H_RX_CFG_SET_RSP =
276 BFA_I2HM(BFI_ENET_H2I_RX_CFG_SET_REQ),
277 BFI_ENET_I2H_RX_CFG_CLR_RSP =
278 BFA_I2HM(BFI_ENET_H2I_RX_CFG_CLR_REQ),
279
280 BFI_ENET_I2H_RIT_CFG_RSP =
281 BFA_I2HM(BFI_ENET_H2I_RIT_CFG_REQ),
282 BFI_ENET_I2H_RSS_CFG_RSP =
283 BFA_I2HM(BFI_ENET_H2I_RSS_CFG_REQ),
284 BFI_ENET_I2H_RSS_ENABLE_RSP =
285 BFA_I2HM(BFI_ENET_H2I_RSS_ENABLE_REQ),
286 BFI_ENET_I2H_RX_PROMISCUOUS_RSP =
287 BFA_I2HM(BFI_ENET_H2I_RX_PROMISCUOUS_REQ),
288 BFI_ENET_I2H_RX_DEFAULT_RSP =
289 BFA_I2HM(BFI_ENET_H2I_RX_DEFAULT_REQ),
290
291 BFI_ENET_I2H_MAC_UCAST_SET_RSP =
292 BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_SET_REQ),
293 BFI_ENET_I2H_MAC_UCAST_CLR_RSP =
294 BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_CLR_REQ),
295 BFI_ENET_I2H_MAC_UCAST_ADD_RSP =
296 BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_ADD_REQ),
297 BFI_ENET_I2H_MAC_UCAST_DEL_RSP =
298 BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_DEL_REQ),
299
300 BFI_ENET_I2H_MAC_MCAST_ADD_RSP =
301 BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_ADD_REQ),
302 BFI_ENET_I2H_MAC_MCAST_DEL_RSP =
303 BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_DEL_REQ),
304 BFI_ENET_I2H_MAC_MCAST_FILTER_RSP =
305 BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_FILTER_REQ),
306
307 BFI_ENET_I2H_RX_VLAN_SET_RSP =
308 BFA_I2HM(BFI_ENET_H2I_RX_VLAN_SET_REQ),
309
310 BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP =
311 BFA_I2HM(BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ),
312
313 /* Tx Responses */
314 BFI_ENET_I2H_TX_CFG_SET_RSP =
315 BFA_I2HM(BFI_ENET_H2I_TX_CFG_SET_REQ),
316 BFI_ENET_I2H_TX_CFG_CLR_RSP =
317 BFA_I2HM(BFI_ENET_H2I_TX_CFG_CLR_REQ),
318
319 /* Port Responses */
320 BFI_ENET_I2H_PORT_ADMIN_RSP =
321 BFA_I2HM(BFI_ENET_H2I_PORT_ADMIN_UP_REQ),
322
323 BFI_ENET_I2H_SET_PAUSE_RSP =
324 BFA_I2HM(BFI_ENET_H2I_SET_PAUSE_REQ),
325 BFI_ENET_I2H_DIAG_LOOPBACK_RSP =
326 BFA_I2HM(BFI_ENET_H2I_DIAG_LOOPBACK_REQ),
327
328 /* Attributes Response */
329 BFI_ENET_I2H_GET_ATTR_RSP =
330 BFA_I2HM(BFI_ENET_H2I_GET_ATTR_REQ),
331
332 /* Statistics Responses */
333 BFI_ENET_I2H_STATS_GET_RSP =
334 BFA_I2HM(BFI_ENET_H2I_STATS_GET_REQ),
335 BFI_ENET_I2H_STATS_CLR_RSP =
336 BFA_I2HM(BFI_ENET_H2I_STATS_CLR_REQ),
337
338 BFI_ENET_I2H_WOL_MAGIC_RSP =
339 BFA_I2HM(BFI_ENET_H2I_WOL_MAGIC_REQ),
340 BFI_ENET_I2H_WOL_FRAME_RSP =
341 BFA_I2HM(BFI_ENET_H2I_WOL_FRAME_REQ),
342
343 /* AENs */
344 BFI_ENET_I2H_LINK_DOWN_AEN = BFA_I2HM(BFI_ENET_H2I_MAX),
345 BFI_ENET_I2H_LINK_UP_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 1),
346
347 BFI_ENET_I2H_PORT_ENABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 2),
348 BFI_ENET_I2H_PORT_DISABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 3),
349
350 BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
351};
352
353/**
354 * The following error codes can be returned by the enet commands
355 */
356enum bfi_enet_err {
357 BFI_ENET_CMD_OK = 0,
358 BFI_ENET_CMD_FAIL = 1,
359 BFI_ENET_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
360 BFI_ENET_CMD_CAM_FULL = 3, /* !< CAM is full */
361 BFI_ENET_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
362 BFI_ENET_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
363 BFI_ENET_CMD_WAITING = 6, /* !< Waiting for completion */
364 BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
365};
366
367/**
368 * Generic Request
369 *
370 * bfi_enet_req is used by:
371 * BFI_ENET_H2I_RX_CFG_CLR_REQ
372 * BFI_ENET_H2I_TX_CFG_CLR_REQ
373 */
374struct bfi_enet_req {
375 struct bfi_msgq_mhdr mh;
376};
377
378/**
379 * Enable/Disable Request
380 *
381 * bfi_enet_enable_req is used by:
382 * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
383 * BFI_ENET_H2I_RX_PROMISCUOUS_REQ (enet_id must be zero)
384 * BFI_ENET_H2I_RX_DEFAULT_REQ (enet_id must be zero)
385 * BFI_ENET_H2I_RX_MAC_MCAST_FILTER_REQ
386 * BFI_ENET_H2I_PORT_ADMIN_UP_REQ (enet_id must be zero)
387 */
388struct bfi_enet_enable_req {
389 struct bfi_msgq_mhdr mh;
390 u8 enable; /* 1 = enable; 0 = disable */
391 u8 rsvd[3];
392};
393
394/**
395 * Generic Response
396 */
397struct bfi_enet_rsp {
398 struct bfi_msgq_mhdr mh;
399 u8 error; /*!< if error see cmd_offset */
400 u8 rsvd;
401 u16 cmd_offset; /*!< offset to invalid parameter */
402};
403
404/**
405 * GLOBAL CONFIGURATION
406 */
407
408/**
409 * bfi_enet_attr_req is used by:
410 * BFI_ENET_H2I_GET_ATTR_REQ
411 */
412struct bfi_enet_attr_req {
413 struct bfi_msgq_mhdr mh;
414};
415
416/**
417 * bfi_enet_attr_rsp is used by:
418 * BFI_ENET_I2H_GET_ATTR_RSP
419 */
420struct bfi_enet_attr_rsp {
421 struct bfi_msgq_mhdr mh;
422 u8 error; /*!< if error see cmd_offset */
423 u8 rsvd;
424 u16 cmd_offset; /*!< offset to invalid parameter */
425 u32 max_cfg;
426 u32 max_ucmac;
427 u32 rit_size;
428};
429
430/**
431 * Tx Configuration
432 *
433 * bfi_enet_tx_cfg is used by:
434 * BFI_ENET_H2I_TX_CFG_SET_REQ
435 */
436enum bfi_enet_tx_vlan_mode {
437 BFI_ENET_TX_VLAN_NOP = 0,
438 BFI_ENET_TX_VLAN_INS = 1,
439 BFI_ENET_TX_VLAN_WI = 2,
440};
441
442struct bfi_enet_tx_cfg {
443 u8 vlan_mode; /*!< processing mode */
444 u8 rsvd;
445 u16 vlan_id;
446 u8 admit_tagged_frame;
447 u8 apply_vlan_filter;
448 u8 add_to_vswitch;
449 u8 rsvd1[1];
450};
451
452struct bfi_enet_tx_cfg_req {
453 struct bfi_msgq_mhdr mh;
454 u8 num_queues; /* # of Tx Queues */
455 u8 rsvd[3];
456
457 struct {
458 struct bfi_enet_txq q;
459 struct bfi_enet_ib ib;
460 } q_cfg[BFI_ENET_TXQ_PRIO_MAX];
461
462 struct bfi_enet_ib_cfg ib_cfg;
463
464 struct bfi_enet_tx_cfg tx_cfg;
465};
466
467struct bfi_enet_tx_cfg_rsp {
468 struct bfi_msgq_mhdr mh;
469 u8 error;
470 u8 hw_id; /* For debugging */
471 u8 rsvd[2];
472 struct {
473 u32 q_dbell; /* PCI base address offset */
474 u32 i_dbell; /* PCI base address offset */
475 u8 hw_qid; /* For debugging */
476 u8 rsvd[3];
477 } q_handles[BFI_ENET_TXQ_PRIO_MAX];
478};
479
480/**
481 * Rx Configuration
482 *
483 * bfi_enet_rx_cfg is used by:
484 * BFI_ENET_H2I_RX_CFG_SET_REQ
485 */
486enum bfi_enet_rxq_type {
487 BFI_ENET_RXQ_SINGLE = 1,
488 BFI_ENET_RXQ_LARGE_SMALL = 2,
489 BFI_ENET_RXQ_HDS = 3,
490 BFI_ENET_RXQ_HDS_OPT_BASED = 4,
491};
492
493enum bfi_enet_hds_type {
494 BFI_ENET_HDS_FORCED = 0x01,
495 BFI_ENET_HDS_IPV6_UDP = 0x02,
496 BFI_ENET_HDS_IPV6_TCP = 0x04,
497 BFI_ENET_HDS_IPV4_TCP = 0x08,
498 BFI_ENET_HDS_IPV4_UDP = 0x10,
499};
500
501struct bfi_enet_rx_cfg {
502 u8 rxq_type;
503 u8 rsvd[3];
504
505 struct {
506 u8 max_header_size;
507 u8 force_offset;
508 u8 type;
509 u8 rsvd1;
510 } hds;
511
512 u8 multi_buffer;
513 u8 strip_vlan;
514 u8 drop_untagged;
515 u8 rsvd2;
516};
517
518/*
519 * Multicast frames are received on the ql of q-set index zero.
520 * On the completion queue. RxQ ID = even is for large/data buffer queues
521 * and RxQ ID = odd is for small/header buffer queues.
522 */
523struct bfi_enet_rx_cfg_req {
524 struct bfi_msgq_mhdr mh;
525 u8 num_queue_sets; /* # of Rx Queue Sets */
526 u8 rsvd[3];
527
528 struct {
529 struct bfi_enet_rxq ql; /* large/data/single buffers */
530 struct bfi_enet_rxq qs; /* small/header buffers */
531 struct bfi_enet_cq cq;
532 struct bfi_enet_ib ib;
533 } q_cfg[BFI_ENET_RX_QSET_MAX];
534
535 struct bfi_enet_ib_cfg ib_cfg;
536
537 struct bfi_enet_rx_cfg rx_cfg;
538};
539
540struct bfi_enet_rx_cfg_rsp {
541 struct bfi_msgq_mhdr mh;
542 u8 error;
543 u8 hw_id; /* For debugging */
544 u8 rsvd[2];
545 struct {
546 u32 ql_dbell; /* PCI base address offset */
547 u32 qs_dbell; /* PCI base address offset */
548 u32 i_dbell; /* PCI base address offset */
549 u8 hw_lqid; /* For debugging */
550 u8 hw_sqid; /* For debugging */
551 u8 hw_cqid; /* For debugging */
552 u8 rsvd;
553 } q_handles[BFI_ENET_RX_QSET_MAX];
554};
555
556/**
557 * RIT
558 *
559 * bfi_enet_rit_req is used by:
560 * BFI_ENET_H2I_RIT_CFG_REQ
561 */
562struct bfi_enet_rit_req {
563 struct bfi_msgq_mhdr mh;
564 u16 size; /* number of table-entries used */
565 u8 rsvd[2];
566 u8 table[BFI_ENET_RSS_RIT_MAX];
567};
568
569/**
570 * RSS
571 *
572 * bfi_enet_rss_cfg_req is used by:
573 * BFI_ENET_H2I_RSS_CFG_REQ
574 */
575enum bfi_enet_rss_type {
576 BFI_ENET_RSS_IPV6 = 0x01,
577 BFI_ENET_RSS_IPV6_TCP = 0x02,
578 BFI_ENET_RSS_IPV4 = 0x04,
579 BFI_ENET_RSS_IPV4_TCP = 0x08
580};
581
582struct bfi_enet_rss_cfg {
583 u8 type;
584 u8 mask;
585 u8 rsvd[2];
586 u32 key[BFI_ENET_RSS_KEY_LEN];
587};
588
589struct bfi_enet_rss_cfg_req {
590 struct bfi_msgq_mhdr mh;
591 struct bfi_enet_rss_cfg cfg;
592};
593
594/**
595 * MAC Unicast
596 *
597 * bfi_enet_rx_vlan_req is used by:
598 * BFI_ENET_H2I_MAC_UCAST_SET_REQ
599 * BFI_ENET_H2I_MAC_UCAST_CLR_REQ
600 * BFI_ENET_H2I_MAC_UCAST_ADD_REQ
601 * BFI_ENET_H2I_MAC_UCAST_DEL_REQ
602 */
603struct bfi_enet_ucast_req {
604 struct bfi_msgq_mhdr mh;
605 mac_t mac_addr;
606 u8 rsvd[2];
607};
608
609/**
610 * MAC Unicast + VLAN
611 */
612struct bfi_enet_mac_n_vlan_req {
613 struct bfi_msgq_mhdr mh;
614 u16 vlan_id;
615 mac_t mac_addr;
616};
617
618/**
619 * MAC Multicast
620 *
621 * bfi_enet_mac_mfilter_add_req is used by:
622 * BFI_ENET_H2I_MAC_MCAST_ADD_REQ
623 */
624struct bfi_enet_mcast_add_req {
625 struct bfi_msgq_mhdr mh;
626 mac_t mac_addr;
627 u8 rsvd[2];
628};
629
630/**
631 * bfi_enet_mac_mfilter_add_rsp is used by:
632 * BFI_ENET_I2H_MAC_MCAST_ADD_RSP
633 */
634struct bfi_enet_mcast_add_rsp {
635 struct bfi_msgq_mhdr mh;
636 u8 error;
637 u8 rsvd;
638 u16 cmd_offset;
639 u16 handle;
640 u8 rsvd1[2];
641};
642
643/**
644 * bfi_enet_mac_mfilter_del_req is used by:
645 * BFI_ENET_H2I_MAC_MCAST_DEL_REQ
646 */
647struct bfi_enet_mcast_del_req {
648 struct bfi_msgq_mhdr mh;
649 u16 handle;
650 u8 rsvd[2];
651};
652
653/**
654 * VLAN
655 *
656 * bfi_enet_rx_vlan_req is used by:
657 * BFI_ENET_H2I_RX_VLAN_SET_REQ
658 */
659struct bfi_enet_rx_vlan_req {
660 struct bfi_msgq_mhdr mh;
661 u8 block_idx;
662 u8 rsvd[3];
663 u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
664};
665
666/**
667 * PAUSE
668 *
669 * bfi_enet_set_pause_req is used by:
670 * BFI_ENET_H2I_SET_PAUSE_REQ
671 */
672struct bfi_enet_set_pause_req {
673 struct bfi_msgq_mhdr mh;
674 u8 rsvd[2];
675 u8 tx_pause; /* 1 = enable; 0 = disable */
676 u8 rx_pause; /* 1 = enable; 0 = disable */
677};
678
679/**
680 * DIAGNOSTICS
681 *
682 * bfi_enet_diag_lb_req is used by:
683 * BFI_ENET_H2I_DIAG_LOOPBACK
684 */
685struct bfi_enet_diag_lb_req {
686 struct bfi_msgq_mhdr mh;
687 u8 rsvd[2];
688 u8 mode; /* cable or Serdes */
689 u8 enable; /* 1 = enable; 0 = disable */
690};
691
692/**
693 * enum for Loopback opmodes
694 */
695enum {
696 BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
697 BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
698};
699
700/**
701 * STATISTICS
702 *
703 * bfi_enet_stats_req is used by:
704 * BFI_ENET_H2I_STATS_GET_REQ
705 * BFI_ENET_I2H_STATS_CLR_REQ
706 */
707struct bfi_enet_stats_req {
708 struct bfi_msgq_mhdr mh;
709 u16 stats_mask;
710 u8 rsvd[2];
711 u32 rx_enet_mask;
712 u32 tx_enet_mask;
713 union bfi_addr_u host_buffer;
714};
715
716/**
717 * defines for "stats_mask" above.
718 */
719#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
720#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
721#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
722#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
723#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
724
725#define BFI_ENET_STATS_ALL 0x1f
726
727/* TxF Frame Statistics */
728struct bfi_enet_stats_txf {
729 u64 ucast_octets;
730 u64 ucast;
731 u64 ucast_vlan;
732
733 u64 mcast_octets;
734 u64 mcast;
735 u64 mcast_vlan;
736
737 u64 bcast_octets;
738 u64 bcast;
739 u64 bcast_vlan;
740
741 u64 errors;
742 u64 filter_vlan; /* frames filtered due to VLAN */
743 u64 filter_mac_sa; /* frames filtered due to SA check */
744};
745
746/* RxF Frame Statistics */
747struct bfi_enet_stats_rxf {
748 u64 ucast_octets;
749 u64 ucast;
750 u64 ucast_vlan;
751
752 u64 mcast_octets;
753 u64 mcast;
754 u64 mcast_vlan;
755
756 u64 bcast_octets;
757 u64 bcast;
758 u64 bcast_vlan;
759 u64 frame_drops;
760};
761
762/* FC Tx Frame Statistics */
763struct bfi_enet_stats_fc_tx {
764 u64 txf_ucast_octets;
765 u64 txf_ucast;
766 u64 txf_ucast_vlan;
767
768 u64 txf_mcast_octets;
769 u64 txf_mcast;
770 u64 txf_mcast_vlan;
771
772 u64 txf_bcast_octets;
773 u64 txf_bcast;
774 u64 txf_bcast_vlan;
775
776 u64 txf_parity_errors;
777 u64 txf_timeout;
778 u64 txf_fid_parity_errors;
779};
780
781/* FC Rx Frame Statistics */
782struct bfi_enet_stats_fc_rx {
783 u64 rxf_ucast_octets;
784 u64 rxf_ucast;
785 u64 rxf_ucast_vlan;
786
787 u64 rxf_mcast_octets;
788 u64 rxf_mcast;
789 u64 rxf_mcast_vlan;
790
791 u64 rxf_bcast_octets;
792 u64 rxf_bcast;
793 u64 rxf_bcast_vlan;
794};
795
796/* RAD Frame Statistics */
797struct bfi_enet_stats_rad {
798 u64 rx_frames;
799 u64 rx_octets;
800 u64 rx_vlan_frames;
801
802 u64 rx_ucast;
803 u64 rx_ucast_octets;
804 u64 rx_ucast_vlan;
805
806 u64 rx_mcast;
807 u64 rx_mcast_octets;
808 u64 rx_mcast_vlan;
809
810 u64 rx_bcast;
811 u64 rx_bcast_octets;
812 u64 rx_bcast_vlan;
813
814 u64 rx_drops;
815};
816
817/* BPC Tx Registers */
818struct bfi_enet_stats_bpc {
819 /* transmit stats */
820 u64 tx_pause[8];
821 u64 tx_zero_pause[8]; /*!< Pause cancellation */
822 /*!<Pause initiation rather than retention */
823 u64 tx_first_pause[8];
824
825 /* receive stats */
826 u64 rx_pause[8];
827 u64 rx_zero_pause[8]; /*!< Pause cancellation */
828 /*!<Pause initiation rather than retention */
829 u64 rx_first_pause[8];
830};
831
832/* MAC Rx Statistics */
833struct bfi_enet_stats_mac {
834 u64 frame_64; /* both rx and tx counter */
835 u64 frame_65_127; /* both rx and tx counter */
836 u64 frame_128_255; /* both rx and tx counter */
837 u64 frame_256_511; /* both rx and tx counter */
838 u64 frame_512_1023; /* both rx and tx counter */
839 u64 frame_1024_1518; /* both rx and tx counter */
840 u64 frame_1519_1522; /* both rx and tx counter */
841
842 /* receive stats */
843 u64 rx_bytes;
844 u64 rx_packets;
845 u64 rx_fcs_error;
846 u64 rx_multicast;
847 u64 rx_broadcast;
848 u64 rx_control_frames;
849 u64 rx_pause;
850 u64 rx_unknown_opcode;
851 u64 rx_alignment_error;
852 u64 rx_frame_length_error;
853 u64 rx_code_error;
854 u64 rx_carrier_sense_error;
855 u64 rx_undersize;
856 u64 rx_oversize;
857 u64 rx_fragments;
858 u64 rx_jabber;
859 u64 rx_drop;
860
861 /* transmit stats */
862 u64 tx_bytes;
863 u64 tx_packets;
864 u64 tx_multicast;
865 u64 tx_broadcast;
866 u64 tx_pause;
867 u64 tx_deferral;
868 u64 tx_excessive_deferral;
869 u64 tx_single_collision;
870 u64 tx_muliple_collision;
871 u64 tx_late_collision;
872 u64 tx_excessive_collision;
873 u64 tx_total_collision;
874 u64 tx_pause_honored;
875 u64 tx_drop;
876 u64 tx_jabber;
877 u64 tx_fcs_error;
878 u64 tx_control_frame;
879 u64 tx_oversize;
880 u64 tx_undersize;
881 u64 tx_fragments;
882};
883
884/**
885 * Complete statistics, DMAed from fw to host followed by
886 * BFI_ENET_I2H_STATS_GET_RSP
887 */
888struct bfi_enet_stats {
889 struct bfi_enet_stats_mac mac_stats;
890 struct bfi_enet_stats_bpc bpc_stats;
891 struct bfi_enet_stats_rad rad_stats;
892 struct bfi_enet_stats_rad rlb_stats;
893 struct bfi_enet_stats_fc_rx fc_rx_stats;
894 struct bfi_enet_stats_fc_tx fc_tx_stats;
895 struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX];
896 struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX];
897};
898
899#pragma pack()
900
901#endif /* __BFI_ENET_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
new file mode 100644
index 000000000000..68a275d66fcf
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -0,0 +1,2129 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19
20static inline int
21ethport_can_be_up(struct bna_ethport *ethport)
22{
23 int ready = 0;
24 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
28 else
29 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
32 return ready;
33}
34
35#define ethport_is_up ethport_can_be_up
36
37enum bna_ethport_event {
38 ETHPORT_E_START = 1,
39 ETHPORT_E_STOP = 2,
40 ETHPORT_E_FAIL = 3,
41 ETHPORT_E_UP = 4,
42 ETHPORT_E_DOWN = 5,
43 ETHPORT_E_FWRESP_UP_OK = 6,
44 ETHPORT_E_FWRESP_DOWN = 7,
45 ETHPORT_E_FWRESP_UP_FAIL = 8,
46};
47
48enum bna_enet_event {
49 ENET_E_START = 1,
50 ENET_E_STOP = 2,
51 ENET_E_FAIL = 3,
52 ENET_E_PAUSE_CFG = 4,
53 ENET_E_MTU_CFG = 5,
54 ENET_E_FWRESP_PAUSE = 6,
55 ENET_E_CHLD_STOPPED = 7,
56};
57
58enum bna_ioceth_event {
59 IOCETH_E_ENABLE = 1,
60 IOCETH_E_DISABLE = 2,
61 IOCETH_E_IOC_RESET = 3,
62 IOCETH_E_IOC_FAILED = 4,
63 IOCETH_E_IOC_READY = 5,
64 IOCETH_E_ENET_ATTR_RESP = 6,
65 IOCETH_E_ENET_STOPPED = 7,
66 IOCETH_E_IOC_DISABLED = 8,
67};
68
69#define bna_stats_copy(_name, _type) \
70do { \
71 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
72 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
73 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
74 for (i = 0; i < count; i++) \
75 stats_dst[i] = be64_to_cpu(stats_src[i]); \
76} while (0) \
77
78/*
79 * FW response handlers
80 */
81
82static void
83bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84 struct bfi_msgq_mhdr *msghdr)
85{
86 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
87
88 if (ethport_can_be_up(ethport))
89 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
90}
91
92static void
93bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94 struct bfi_msgq_mhdr *msghdr)
95{
96 int ethport_up = ethport_is_up(ethport);
97
98 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
99
100 if (ethport_up)
101 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
102}
103
104static void
105bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106 struct bfi_msgq_mhdr *msghdr)
107{
108 struct bfi_enet_enable_req *admin_req =
109 &ethport->bfi_enet_cmd.admin_req;
110 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
111
112 switch (admin_req->enable) {
113 case BNA_STATUS_T_ENABLED:
114 if (rsp->error == BFI_ENET_CMD_OK)
115 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
116 else {
117 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
118 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
119 }
120 break;
121
122 case BNA_STATUS_T_DISABLED:
123 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
124 ethport->link_status = BNA_LINK_DOWN;
125 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
126 break;
127 }
128}
129
130static void
131bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
132 struct bfi_msgq_mhdr *msghdr)
133{
134 struct bfi_enet_diag_lb_req *diag_lb_req =
135 &ethport->bfi_enet_cmd.lpbk_req;
136 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
137
138 switch (diag_lb_req->enable) {
139 case BNA_STATUS_T_ENABLED:
140 if (rsp->error == BFI_ENET_CMD_OK)
141 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
142 else {
143 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
144 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
145 }
146 break;
147
148 case BNA_STATUS_T_DISABLED:
149 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
150 break;
151 }
152}
153
154static void
155bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
156{
157 bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
158}
159
160static void
161bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
162 struct bfi_msgq_mhdr *msghdr)
163{
164 struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
165
166 /**
167 * Store only if not set earlier, since BNAD can override the HW
168 * attributes
169 */
170 if (!ioceth->attr.num_txq)
171 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
172 if (!ioceth->attr.num_rxp)
173 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
174 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
175 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
176 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
177
178 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
179}
180
181static void
182bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
183{
184 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
185 u64 *stats_src;
186 u64 *stats_dst;
187 u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
188 u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
189 int count;
190 int i;
191
192 bna_stats_copy(mac, mac);
193 bna_stats_copy(bpc, bpc);
194 bna_stats_copy(rad, rad);
195 bna_stats_copy(rlb, rad);
196 bna_stats_copy(fc_rx, fc_rx);
197 bna_stats_copy(fc_tx, fc_tx);
198
199 stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
200
201 /* Copy Rxf stats to SW area, scatter them while copying */
202 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
203 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
204 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
205 if (rx_enet_mask & ((u32)(1 << i))) {
206 int k;
207 count = sizeof(struct bfi_enet_stats_rxf) /
208 sizeof(u64);
209 for (k = 0; k < count; k++) {
210 stats_dst[k] = be64_to_cpu(*stats_src);
211 stats_src++;
212 }
213 }
214 }
215
216 /* Copy Txf stats to SW area, scatter them while copying */
217 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
218 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
219 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
220 if (tx_enet_mask & ((u32)(1 << i))) {
221 int k;
222 count = sizeof(struct bfi_enet_stats_txf) /
223 sizeof(u64);
224 for (k = 0; k < count; k++) {
225 stats_dst[k] = be64_to_cpu(*stats_src);
226 stats_src++;
227 }
228 }
229 }
230
231 bna->stats_mod.stats_get_busy = false;
232 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
233}
234
235static void
236bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
237 struct bfi_msgq_mhdr *msghdr)
238{
239 ethport->link_status = BNA_LINK_UP;
240
241 /* Dispatch events */
242 ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
243}
244
245static void
246bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
247 struct bfi_msgq_mhdr *msghdr)
248{
249 ethport->link_status = BNA_LINK_DOWN;
250
251 /* Dispatch events */
252 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
253}
254
255static void
256bna_err_handler(struct bna *bna, u32 intr_status)
257{
258 if (BNA_IS_HALT_INTR(bna, intr_status))
259 bna_halt_clear(bna);
260
261 bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
262}
263
264void
265bna_mbox_handler(struct bna *bna, u32 intr_status)
266{
267 if (BNA_IS_ERR_INTR(bna, intr_status)) {
268 bna_err_handler(bna, intr_status);
269 return;
270 }
271 if (BNA_IS_MBOX_INTR(bna, intr_status))
272 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
273}
274
275static void
276bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
277{
278 struct bna *bna = (struct bna *)arg;
279 struct bna_tx *tx;
280 struct bna_rx *rx;
281
282 switch (msghdr->msg_id) {
283 case BFI_ENET_I2H_RX_CFG_SET_RSP:
284 bna_rx_from_rid(bna, msghdr->enet_id, rx);
285 if (rx)
286 bna_bfi_rx_enet_start_rsp(rx, msghdr);
287 break;
288
289 case BFI_ENET_I2H_RX_CFG_CLR_RSP:
290 bna_rx_from_rid(bna, msghdr->enet_id, rx);
291 if (rx)
292 bna_bfi_rx_enet_stop_rsp(rx, msghdr);
293 break;
294
295 case BFI_ENET_I2H_RIT_CFG_RSP:
296 case BFI_ENET_I2H_RSS_CFG_RSP:
297 case BFI_ENET_I2H_RSS_ENABLE_RSP:
298 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
299 case BFI_ENET_I2H_RX_DEFAULT_RSP:
300 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
301 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
302 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
303 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
304 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
305 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
306 case BFI_ENET_I2H_RX_VLAN_SET_RSP:
307 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
308 bna_rx_from_rid(bna, msghdr->enet_id, rx);
309 if (rx)
310 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
311 break;
312
313 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
314 bna_rx_from_rid(bna, msghdr->enet_id, rx);
315 if (rx)
316 bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
317 break;
318
319 case BFI_ENET_I2H_TX_CFG_SET_RSP:
320 bna_tx_from_rid(bna, msghdr->enet_id, tx);
321 if (tx)
322 bna_bfi_tx_enet_start_rsp(tx, msghdr);
323 break;
324
325 case BFI_ENET_I2H_TX_CFG_CLR_RSP:
326 bna_tx_from_rid(bna, msghdr->enet_id, tx);
327 if (tx)
328 bna_bfi_tx_enet_stop_rsp(tx, msghdr);
329 break;
330
331 case BFI_ENET_I2H_PORT_ADMIN_RSP:
332 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
333 break;
334
335 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
336 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
337 break;
338
339 case BFI_ENET_I2H_SET_PAUSE_RSP:
340 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
341 break;
342
343 case BFI_ENET_I2H_GET_ATTR_RSP:
344 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
345 break;
346
347 case BFI_ENET_I2H_STATS_GET_RSP:
348 bna_bfi_stats_get_rsp(bna, msghdr);
349 break;
350
351 case BFI_ENET_I2H_STATS_CLR_RSP:
352 /* No-op */
353 break;
354
355 case BFI_ENET_I2H_LINK_UP_AEN:
356 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
357 break;
358
359 case BFI_ENET_I2H_LINK_DOWN_AEN:
360 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
361 break;
362
363 case BFI_ENET_I2H_PORT_ENABLE_AEN:
364 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
365 break;
366
367 case BFI_ENET_I2H_PORT_DISABLE_AEN:
368 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
369 break;
370
371 case BFI_ENET_I2H_BW_UPDATE_AEN:
372 bna_bfi_bw_update_aen(&bna->tx_mod);
373 break;
374
375 default:
376 break;
377 }
378}
379
380/**
381 * ETHPORT
382 */
383#define call_ethport_stop_cbfn(_ethport) \
384do { \
385 if ((_ethport)->stop_cbfn) { \
386 void (*cbfn)(struct bna_enet *); \
387 cbfn = (_ethport)->stop_cbfn; \
388 (_ethport)->stop_cbfn = NULL; \
389 cbfn(&(_ethport)->bna->enet); \
390 } \
391} while (0)
392
393#define call_ethport_adminup_cbfn(ethport, status) \
394do { \
395 if ((ethport)->adminup_cbfn) { \
396 void (*cbfn)(struct bnad *, enum bna_cb_status); \
397 cbfn = (ethport)->adminup_cbfn; \
398 (ethport)->adminup_cbfn = NULL; \
399 cbfn((ethport)->bna->bnad, status); \
400 } \
401} while (0)
402
403static void
404bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
405{
406 struct bfi_enet_enable_req *admin_up_req =
407 &ethport->bfi_enet_cmd.admin_req;
408
409 bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
410 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
411 admin_up_req->mh.num_entries = htons(
412 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
413 admin_up_req->enable = BNA_STATUS_T_ENABLED;
414
415 bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
416 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
417 bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
418}
419
420static void
421bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
422{
423 struct bfi_enet_enable_req *admin_down_req =
424 &ethport->bfi_enet_cmd.admin_req;
425
426 bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
427 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
428 admin_down_req->mh.num_entries = htons(
429 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
430 admin_down_req->enable = BNA_STATUS_T_DISABLED;
431
432 bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
433 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
434 bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
435}
436
437static void
438bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
439{
440 struct bfi_enet_diag_lb_req *lpbk_up_req =
441 &ethport->bfi_enet_cmd.lpbk_req;
442
443 bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
444 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
445 lpbk_up_req->mh.num_entries = htons(
446 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
447 lpbk_up_req->mode = (ethport->bna->enet.type ==
448 BNA_ENET_T_LOOPBACK_INTERNAL) ?
449 BFI_ENET_DIAG_LB_OPMODE_EXT :
450 BFI_ENET_DIAG_LB_OPMODE_CBL;
451 lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
452
453 bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
454 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
455 bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
456}
457
458static void
459bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
460{
461 struct bfi_enet_diag_lb_req *lpbk_down_req =
462 &ethport->bfi_enet_cmd.lpbk_req;
463
464 bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
465 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
466 lpbk_down_req->mh.num_entries = htons(
467 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
468 lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
469
470 bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
471 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
472 bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
473}
474
475static void
476bna_bfi_ethport_up(struct bna_ethport *ethport)
477{
478 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
479 bna_bfi_ethport_admin_up(ethport);
480 else
481 bna_bfi_ethport_lpbk_up(ethport);
482}
483
484static void
485bna_bfi_ethport_down(struct bna_ethport *ethport)
486{
487 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
488 bna_bfi_ethport_admin_down(ethport);
489 else
490 bna_bfi_ethport_lpbk_down(ethport);
491}
492
493bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
494 enum bna_ethport_event);
495bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
496 enum bna_ethport_event);
497bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
498 enum bna_ethport_event);
499bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
500 enum bna_ethport_event);
501bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
502 enum bna_ethport_event);
503bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
504 enum bna_ethport_event);
505
506static void
507bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
508{
509 call_ethport_stop_cbfn(ethport);
510}
511
512static void
513bna_ethport_sm_stopped(struct bna_ethport *ethport,
514 enum bna_ethport_event event)
515{
516 switch (event) {
517 case ETHPORT_E_START:
518 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
519 break;
520
521 case ETHPORT_E_STOP:
522 call_ethport_stop_cbfn(ethport);
523 break;
524
525 case ETHPORT_E_FAIL:
526 /* No-op */
527 break;
528
529 case ETHPORT_E_DOWN:
530 /* This event is received due to Rx objects failing */
531 /* No-op */
532 break;
533
534 default:
535 bfa_sm_fault(event);
536 }
537}
538
539static void
540bna_ethport_sm_down_entry(struct bna_ethport *ethport)
541{
542}
543
544static void
545bna_ethport_sm_down(struct bna_ethport *ethport,
546 enum bna_ethport_event event)
547{
548 switch (event) {
549 case ETHPORT_E_STOP:
550 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
551 break;
552
553 case ETHPORT_E_FAIL:
554 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
555 break;
556
557 case ETHPORT_E_UP:
558 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
559 bna_bfi_ethport_up(ethport);
560 break;
561
562 default:
563 bfa_sm_fault(event);
564 }
565}
566
567static void
568bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
569{
570}
571
572static void
573bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
574 enum bna_ethport_event event)
575{
576 switch (event) {
577 case ETHPORT_E_STOP:
578 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
579 break;
580
581 case ETHPORT_E_FAIL:
582 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
583 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
584 break;
585
586 case ETHPORT_E_DOWN:
587 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
588 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
589 break;
590
591 case ETHPORT_E_FWRESP_UP_OK:
592 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
593 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
594 break;
595
596 case ETHPORT_E_FWRESP_UP_FAIL:
597 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
598 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
599 break;
600
601 case ETHPORT_E_FWRESP_DOWN:
602 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
603 bna_bfi_ethport_up(ethport);
604 break;
605
606 default:
607 bfa_sm_fault(event);
608 }
609}
610
611static void
612bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
613{
614 /**
615 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
616 * mbox due to up_resp_wait -> down_resp_wait transition on event
617 * ETHPORT_E_DOWN
618 */
619}
620
621static void
622bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
623 enum bna_ethport_event event)
624{
625 switch (event) {
626 case ETHPORT_E_STOP:
627 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
628 break;
629
630 case ETHPORT_E_FAIL:
631 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
632 break;
633
634 case ETHPORT_E_UP:
635 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
636 break;
637
638 case ETHPORT_E_FWRESP_UP_OK:
639 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
640 bna_bfi_ethport_down(ethport);
641 break;
642
643 case ETHPORT_E_FWRESP_UP_FAIL:
644 case ETHPORT_E_FWRESP_DOWN:
645 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
646 break;
647
648 default:
649 bfa_sm_fault(event);
650 }
651}
652
653static void
654bna_ethport_sm_up_entry(struct bna_ethport *ethport)
655{
656}
657
658static void
659bna_ethport_sm_up(struct bna_ethport *ethport,
660 enum bna_ethport_event event)
661{
662 switch (event) {
663 case ETHPORT_E_STOP:
664 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
665 bna_bfi_ethport_down(ethport);
666 break;
667
668 case ETHPORT_E_FAIL:
669 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
670 break;
671
672 case ETHPORT_E_DOWN:
673 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
674 bna_bfi_ethport_down(ethport);
675 break;
676
677 default:
678 bfa_sm_fault(event);
679 }
680}
681
682static void
683bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
684{
685}
686
687static void
688bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
689 enum bna_ethport_event event)
690{
691 switch (event) {
692 case ETHPORT_E_FAIL:
693 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
694 break;
695
696 case ETHPORT_E_DOWN:
697 /**
698 * This event is received due to Rx objects stopping in
699 * parallel to ethport
700 */
701 /* No-op */
702 break;
703
704 case ETHPORT_E_FWRESP_UP_OK:
705 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
706 bna_bfi_ethport_down(ethport);
707 break;
708
709 case ETHPORT_E_FWRESP_UP_FAIL:
710 case ETHPORT_E_FWRESP_DOWN:
711 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
712 break;
713
714 default:
715 bfa_sm_fault(event);
716 }
717}
718
719static void
720bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
721{
722 ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
723 ethport->bna = bna;
724
725 ethport->link_status = BNA_LINK_DOWN;
726 ethport->link_cbfn = bnad_cb_ethport_link_status;
727
728 ethport->rx_started_count = 0;
729
730 ethport->stop_cbfn = NULL;
731 ethport->adminup_cbfn = NULL;
732
733 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
734}
735
736static void
737bna_ethport_uninit(struct bna_ethport *ethport)
738{
739 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
740 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
741
742 ethport->bna = NULL;
743}
744
745static void
746bna_ethport_start(struct bna_ethport *ethport)
747{
748 bfa_fsm_send_event(ethport, ETHPORT_E_START);
749}
750
751static void
752bna_enet_cb_ethport_stopped(struct bna_enet *enet)
753{
754 bfa_wc_down(&enet->chld_stop_wc);
755}
756
757static void
758bna_ethport_stop(struct bna_ethport *ethport)
759{
760 ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
761 bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
762}
763
764static void
765bna_ethport_fail(struct bna_ethport *ethport)
766{
767 /* Reset the physical port status to enabled */
768 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
769
770 if (ethport->link_status != BNA_LINK_DOWN) {
771 ethport->link_status = BNA_LINK_DOWN;
772 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
773 }
774 bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
775}
776
777/* Should be called only when ethport is disabled */
778void
779bna_ethport_cb_rx_started(struct bna_ethport *ethport)
780{
781 ethport->rx_started_count++;
782
783 if (ethport->rx_started_count == 1) {
784 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
785
786 if (ethport_can_be_up(ethport))
787 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
788 }
789}
790
791void
792bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
793{
794 int ethport_up = ethport_is_up(ethport);
795
796 ethport->rx_started_count--;
797
798 if (ethport->rx_started_count == 0) {
799 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
800
801 if (ethport_up)
802 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
803 }
804}
805
806/**
807 * ENET
808 */
809#define bna_enet_chld_start(enet) \
810do { \
811 enum bna_tx_type tx_type = \
812 ((enet)->type == BNA_ENET_T_REGULAR) ? \
813 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
814 enum bna_rx_type rx_type = \
815 ((enet)->type == BNA_ENET_T_REGULAR) ? \
816 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
817 bna_ethport_start(&(enet)->bna->ethport); \
818 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
819 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
820} while (0)
821
822#define bna_enet_chld_stop(enet) \
823do { \
824 enum bna_tx_type tx_type = \
825 ((enet)->type == BNA_ENET_T_REGULAR) ? \
826 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
827 enum bna_rx_type rx_type = \
828 ((enet)->type == BNA_ENET_T_REGULAR) ? \
829 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
830 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
831 bfa_wc_up(&(enet)->chld_stop_wc); \
832 bna_ethport_stop(&(enet)->bna->ethport); \
833 bfa_wc_up(&(enet)->chld_stop_wc); \
834 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
835 bfa_wc_up(&(enet)->chld_stop_wc); \
836 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
837 bfa_wc_wait(&(enet)->chld_stop_wc); \
838} while (0)
839
840#define bna_enet_chld_fail(enet) \
841do { \
842 bna_ethport_fail(&(enet)->bna->ethport); \
843 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
844 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
845} while (0)
846
847#define bna_enet_rx_start(enet) \
848do { \
849 enum bna_rx_type rx_type = \
850 ((enet)->type == BNA_ENET_T_REGULAR) ? \
851 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
852 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
853} while (0)
854
855#define bna_enet_rx_stop(enet) \
856do { \
857 enum bna_rx_type rx_type = \
858 ((enet)->type == BNA_ENET_T_REGULAR) ? \
859 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
860 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
861 bfa_wc_up(&(enet)->chld_stop_wc); \
862 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
863 bfa_wc_wait(&(enet)->chld_stop_wc); \
864} while (0)
865
866#define call_enet_stop_cbfn(enet) \
867do { \
868 if ((enet)->stop_cbfn) { \
869 void (*cbfn)(void *); \
870 void *cbarg; \
871 cbfn = (enet)->stop_cbfn; \
872 cbarg = (enet)->stop_cbarg; \
873 (enet)->stop_cbfn = NULL; \
874 (enet)->stop_cbarg = NULL; \
875 cbfn(cbarg); \
876 } \
877} while (0)
878
879#define call_enet_pause_cbfn(enet) \
880do { \
881 if ((enet)->pause_cbfn) { \
882 void (*cbfn)(struct bnad *); \
883 cbfn = (enet)->pause_cbfn; \
884 (enet)->pause_cbfn = NULL; \
885 cbfn((enet)->bna->bnad); \
886 } \
887} while (0)
888
889#define call_enet_mtu_cbfn(enet) \
890do { \
891 if ((enet)->mtu_cbfn) { \
892 void (*cbfn)(struct bnad *); \
893 cbfn = (enet)->mtu_cbfn; \
894 (enet)->mtu_cbfn = NULL; \
895 cbfn((enet)->bna->bnad); \
896 } \
897} while (0)
898
899static void bna_enet_cb_chld_stopped(void *arg);
900static void bna_bfi_pause_set(struct bna_enet *enet);
901
902bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
903 enum bna_enet_event);
904bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
905 enum bna_enet_event);
906bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
907 enum bna_enet_event);
908bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
909 enum bna_enet_event);
910bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
911 enum bna_enet_event);
912bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
913 enum bna_enet_event);
914bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
915 enum bna_enet_event);
916
917static void
918bna_enet_sm_stopped_entry(struct bna_enet *enet)
919{
920 call_enet_pause_cbfn(enet);
921 call_enet_mtu_cbfn(enet);
922 call_enet_stop_cbfn(enet);
923}
924
925static void
926bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
927{
928 switch (event) {
929 case ENET_E_START:
930 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
931 break;
932
933 case ENET_E_STOP:
934 call_enet_stop_cbfn(enet);
935 break;
936
937 case ENET_E_FAIL:
938 /* No-op */
939 break;
940
941 case ENET_E_PAUSE_CFG:
942 call_enet_pause_cbfn(enet);
943 break;
944
945 case ENET_E_MTU_CFG:
946 call_enet_mtu_cbfn(enet);
947 break;
948
949 case ENET_E_CHLD_STOPPED:
950 /**
951 * This event is received due to Ethport, Tx and Rx objects
952 * failing
953 */
954 /* No-op */
955 break;
956
957 default:
958 bfa_sm_fault(event);
959 }
960}
961
962static void
963bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
964{
965 bna_bfi_pause_set(enet);
966}
967
968static void
969bna_enet_sm_pause_init_wait(struct bna_enet *enet,
970 enum bna_enet_event event)
971{
972 switch (event) {
973 case ENET_E_STOP:
974 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
975 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
976 break;
977
978 case ENET_E_FAIL:
979 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
980 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
981 break;
982
983 case ENET_E_PAUSE_CFG:
984 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
985 break;
986
987 case ENET_E_MTU_CFG:
988 /* No-op */
989 break;
990
991 case ENET_E_FWRESP_PAUSE:
992 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
993 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
994 bna_bfi_pause_set(enet);
995 } else {
996 bfa_fsm_set_state(enet, bna_enet_sm_started);
997 bna_enet_chld_start(enet);
998 }
999 break;
1000
1001 default:
1002 bfa_sm_fault(event);
1003 }
1004}
1005
1006static void
1007bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1008{
1009 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1010}
1011
1012static void
1013bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1014 enum bna_enet_event event)
1015{
1016 switch (event) {
1017 case ENET_E_FAIL:
1018 case ENET_E_FWRESP_PAUSE:
1019 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1020 break;
1021
1022 default:
1023 bfa_sm_fault(event);
1024 }
1025}
1026
1027static void
1028bna_enet_sm_started_entry(struct bna_enet *enet)
1029{
1030 /**
1031 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1032 * inadvertently called during cfg_wait->started transition as well
1033 */
1034 call_enet_pause_cbfn(enet);
1035 call_enet_mtu_cbfn(enet);
1036}
1037
1038static void
1039bna_enet_sm_started(struct bna_enet *enet,
1040 enum bna_enet_event event)
1041{
1042 switch (event) {
1043 case ENET_E_STOP:
1044 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1045 break;
1046
1047 case ENET_E_FAIL:
1048 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1049 bna_enet_chld_fail(enet);
1050 break;
1051
1052 case ENET_E_PAUSE_CFG:
1053 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1054 bna_bfi_pause_set(enet);
1055 break;
1056
1057 case ENET_E_MTU_CFG:
1058 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1059 bna_enet_rx_stop(enet);
1060 break;
1061
1062 default:
1063 bfa_sm_fault(event);
1064 }
1065}
1066
1067static void
1068bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1069{
1070}
1071
1072static void
1073bna_enet_sm_cfg_wait(struct bna_enet *enet,
1074 enum bna_enet_event event)
1075{
1076 switch (event) {
1077 case ENET_E_STOP:
1078 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1079 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1080 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1081 break;
1082
1083 case ENET_E_FAIL:
1084 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1085 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1086 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1087 bna_enet_chld_fail(enet);
1088 break;
1089
1090 case ENET_E_PAUSE_CFG:
1091 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1092 break;
1093
1094 case ENET_E_MTU_CFG:
1095 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1096 break;
1097
1098 case ENET_E_CHLD_STOPPED:
1099 bna_enet_rx_start(enet);
1100 /* Fall through */
1101 case ENET_E_FWRESP_PAUSE:
1102 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1103 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1104 bna_bfi_pause_set(enet);
1105 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1106 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1107 bna_enet_rx_stop(enet);
1108 } else {
1109 bfa_fsm_set_state(enet, bna_enet_sm_started);
1110 }
1111 break;
1112
1113 default:
1114 bfa_sm_fault(event);
1115 }
1116}
1117
1118static void
1119bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1120{
1121 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1122 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1123}
1124
1125static void
1126bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1127 enum bna_enet_event event)
1128{
1129 switch (event) {
1130 case ENET_E_FAIL:
1131 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1132 bna_enet_chld_fail(enet);
1133 break;
1134
1135 case ENET_E_FWRESP_PAUSE:
1136 case ENET_E_CHLD_STOPPED:
1137 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1138 break;
1139
1140 default:
1141 bfa_sm_fault(event);
1142 }
1143}
1144
1145static void
1146bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1147{
1148 bna_enet_chld_stop(enet);
1149}
1150
1151static void
1152bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1153 enum bna_enet_event event)
1154{
1155 switch (event) {
1156 case ENET_E_FAIL:
1157 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1158 bna_enet_chld_fail(enet);
1159 break;
1160
1161 case ENET_E_CHLD_STOPPED:
1162 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1163 break;
1164
1165 default:
1166 bfa_sm_fault(event);
1167 }
1168}
1169
1170static void
1171bna_bfi_pause_set(struct bna_enet *enet)
1172{
1173 struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1174
1175 bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1176 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1177 pause_req->mh.num_entries = htons(
1178 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1179 pause_req->tx_pause = enet->pause_config.tx_pause;
1180 pause_req->rx_pause = enet->pause_config.rx_pause;
1181
1182 bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1183 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1184 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1185}
1186
1187static void
1188bna_enet_cb_chld_stopped(void *arg)
1189{
1190 struct bna_enet *enet = (struct bna_enet *)arg;
1191
1192 bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1193}
1194
1195static void
1196bna_enet_init(struct bna_enet *enet, struct bna *bna)
1197{
1198 enet->bna = bna;
1199 enet->flags = 0;
1200 enet->mtu = 0;
1201 enet->type = BNA_ENET_T_REGULAR;
1202
1203 enet->stop_cbfn = NULL;
1204 enet->stop_cbarg = NULL;
1205
1206 enet->pause_cbfn = NULL;
1207
1208 enet->mtu_cbfn = NULL;
1209
1210 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1211}
1212
1213static void
1214bna_enet_uninit(struct bna_enet *enet)
1215{
1216 enet->flags = 0;
1217
1218 enet->bna = NULL;
1219}
1220
1221static void
1222bna_enet_start(struct bna_enet *enet)
1223{
1224 enet->flags |= BNA_ENET_F_IOCETH_READY;
1225 if (enet->flags & BNA_ENET_F_ENABLED)
1226 bfa_fsm_send_event(enet, ENET_E_START);
1227}
1228
1229static void
1230bna_ioceth_cb_enet_stopped(void *arg)
1231{
1232 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1233
1234 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1235}
1236
1237static void
1238bna_enet_stop(struct bna_enet *enet)
1239{
1240 enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1241 enet->stop_cbarg = &enet->bna->ioceth;
1242
1243 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1244 bfa_fsm_send_event(enet, ENET_E_STOP);
1245}
1246
1247static void
1248bna_enet_fail(struct bna_enet *enet)
1249{
1250 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1251 bfa_fsm_send_event(enet, ENET_E_FAIL);
1252}
1253
1254void
1255bna_enet_cb_tx_stopped(struct bna_enet *enet)
1256{
1257 bfa_wc_down(&enet->chld_stop_wc);
1258}
1259
1260void
1261bna_enet_cb_rx_stopped(struct bna_enet *enet)
1262{
1263 bfa_wc_down(&enet->chld_stop_wc);
1264}
1265
1266int
1267bna_enet_mtu_get(struct bna_enet *enet)
1268{
1269 return enet->mtu;
1270}
1271
1272void
1273bna_enet_enable(struct bna_enet *enet)
1274{
1275 if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1276 return;
1277
1278 enet->flags |= BNA_ENET_F_ENABLED;
1279
1280 if (enet->flags & BNA_ENET_F_IOCETH_READY)
1281 bfa_fsm_send_event(enet, ENET_E_START);
1282}
1283
1284void
1285bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1286 void (*cbfn)(void *))
1287{
1288 if (type == BNA_SOFT_CLEANUP) {
1289 (*cbfn)(enet->bna->bnad);
1290 return;
1291 }
1292
1293 enet->stop_cbfn = cbfn;
1294 enet->stop_cbarg = enet->bna->bnad;
1295
1296 enet->flags &= ~BNA_ENET_F_ENABLED;
1297
1298 bfa_fsm_send_event(enet, ENET_E_STOP);
1299}
1300
1301void
1302bna_enet_pause_config(struct bna_enet *enet,
1303 struct bna_pause_config *pause_config,
1304 void (*cbfn)(struct bnad *))
1305{
1306 enet->pause_config = *pause_config;
1307
1308 enet->pause_cbfn = cbfn;
1309
1310 bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1311}
1312
1313void
1314bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1315 void (*cbfn)(struct bnad *))
1316{
1317 enet->mtu = mtu;
1318
1319 enet->mtu_cbfn = cbfn;
1320
1321 bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1322}
1323
1324void
1325bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1326{
1327 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1328}
1329
1330/**
1331 * IOCETH
1332 */
1333#define enable_mbox_intr(_ioceth) \
1334do { \
1335 u32 intr_status; \
1336 bna_intr_status_get((_ioceth)->bna, intr_status); \
1337 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1338 bna_mbox_intr_enable((_ioceth)->bna); \
1339} while (0)
1340
1341#define disable_mbox_intr(_ioceth) \
1342do { \
1343 bna_mbox_intr_disable((_ioceth)->bna); \
1344 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1345} while (0)
1346
1347#define call_ioceth_stop_cbfn(_ioceth) \
1348do { \
1349 if ((_ioceth)->stop_cbfn) { \
1350 void (*cbfn)(struct bnad *); \
1351 struct bnad *cbarg; \
1352 cbfn = (_ioceth)->stop_cbfn; \
1353 cbarg = (_ioceth)->stop_cbarg; \
1354 (_ioceth)->stop_cbfn = NULL; \
1355 (_ioceth)->stop_cbarg = NULL; \
1356 cbfn(cbarg); \
1357 } \
1358} while (0)
1359
1360#define bna_stats_mod_uninit(_stats_mod) \
1361do { \
1362} while (0)
1363
1364#define bna_stats_mod_start(_stats_mod) \
1365do { \
1366 (_stats_mod)->ioc_ready = true; \
1367} while (0)
1368
1369#define bna_stats_mod_stop(_stats_mod) \
1370do { \
1371 (_stats_mod)->ioc_ready = false; \
1372} while (0)
1373
1374#define bna_stats_mod_fail(_stats_mod) \
1375do { \
1376 (_stats_mod)->ioc_ready = false; \
1377 (_stats_mod)->stats_get_busy = false; \
1378 (_stats_mod)->stats_clr_busy = false; \
1379} while (0)
1380
1381static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1382
1383bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1384 enum bna_ioceth_event);
1385bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1386 enum bna_ioceth_event);
1387bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1388 enum bna_ioceth_event);
1389bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1390 enum bna_ioceth_event);
1391bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1392 enum bna_ioceth_event);
1393bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1394 enum bna_ioceth_event);
1395bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1396 enum bna_ioceth_event);
1397bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1398 enum bna_ioceth_event);
1399
1400static void
1401bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1402{
1403 call_ioceth_stop_cbfn(ioceth);
1404}
1405
1406static void
1407bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1408 enum bna_ioceth_event event)
1409{
1410 switch (event) {
1411 case IOCETH_E_ENABLE:
1412 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1413 bfa_nw_ioc_enable(&ioceth->ioc);
1414 break;
1415
1416 case IOCETH_E_DISABLE:
1417 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1418 break;
1419
1420 case IOCETH_E_IOC_RESET:
1421 enable_mbox_intr(ioceth);
1422 break;
1423
1424 case IOCETH_E_IOC_FAILED:
1425 disable_mbox_intr(ioceth);
1426 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1427 break;
1428
1429 default:
1430 bfa_sm_fault(event);
1431 }
1432}
1433
1434static void
1435bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1436{
1437 /**
1438 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1439 * previous state due to failed -> ioc_ready_wait transition.
1440 */
1441}
1442
1443static void
1444bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1445 enum bna_ioceth_event event)
1446{
1447 switch (event) {
1448 case IOCETH_E_DISABLE:
1449 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1450 bfa_nw_ioc_disable(&ioceth->ioc);
1451 break;
1452
1453 case IOCETH_E_IOC_RESET:
1454 enable_mbox_intr(ioceth);
1455 break;
1456
1457 case IOCETH_E_IOC_FAILED:
1458 disable_mbox_intr(ioceth);
1459 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1460 break;
1461
1462 case IOCETH_E_IOC_READY:
1463 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1464 break;
1465
1466 default:
1467 bfa_sm_fault(event);
1468 }
1469}
1470
1471static void
1472bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1473{
1474 bna_bfi_attr_get(ioceth);
1475}
1476
1477static void
1478bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1479 enum bna_ioceth_event event)
1480{
1481 switch (event) {
1482 case IOCETH_E_DISABLE:
1483 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1484 break;
1485
1486 case IOCETH_E_IOC_FAILED:
1487 disable_mbox_intr(ioceth);
1488 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1489 break;
1490
1491 case IOCETH_E_ENET_ATTR_RESP:
1492 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1493 break;
1494
1495 default:
1496 bfa_sm_fault(event);
1497 }
1498}
1499
1500static void
1501bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1502{
1503 bna_enet_start(&ioceth->bna->enet);
1504 bna_stats_mod_start(&ioceth->bna->stats_mod);
1505 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1506}
1507
1508static void
1509bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1510{
1511 switch (event) {
1512 case IOCETH_E_DISABLE:
1513 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1514 break;
1515
1516 case IOCETH_E_IOC_FAILED:
1517 disable_mbox_intr(ioceth);
1518 bna_enet_fail(&ioceth->bna->enet);
1519 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1520 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1521 break;
1522
1523 default:
1524 bfa_sm_fault(event);
1525 }
1526}
1527
1528static void
1529bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1530{
1531}
1532
1533static void
1534bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1535 enum bna_ioceth_event event)
1536{
1537 switch (event) {
1538 case IOCETH_E_IOC_FAILED:
1539 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1540 disable_mbox_intr(ioceth);
1541 bfa_nw_ioc_disable(&ioceth->ioc);
1542 break;
1543
1544 case IOCETH_E_ENET_ATTR_RESP:
1545 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1546 bfa_nw_ioc_disable(&ioceth->ioc);
1547 break;
1548
1549 default:
1550 bfa_sm_fault(event);
1551 }
1552}
1553
1554static void
1555bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1556{
1557 bna_stats_mod_stop(&ioceth->bna->stats_mod);
1558 bna_enet_stop(&ioceth->bna->enet);
1559}
1560
1561static void
1562bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1563 enum bna_ioceth_event event)
1564{
1565 switch (event) {
1566 case IOCETH_E_IOC_FAILED:
1567 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1568 disable_mbox_intr(ioceth);
1569 bna_enet_fail(&ioceth->bna->enet);
1570 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1571 bfa_nw_ioc_disable(&ioceth->ioc);
1572 break;
1573
1574 case IOCETH_E_ENET_STOPPED:
1575 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1576 bfa_nw_ioc_disable(&ioceth->ioc);
1577 break;
1578
1579 default:
1580 bfa_sm_fault(event);
1581 }
1582}
1583
1584static void
1585bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1586{
1587}
1588
1589static void
1590bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1591 enum bna_ioceth_event event)
1592{
1593 switch (event) {
1594 case IOCETH_E_IOC_DISABLED:
1595 disable_mbox_intr(ioceth);
1596 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1597 break;
1598
1599 case IOCETH_E_ENET_STOPPED:
1600 /* This event is received due to enet failing */
1601 /* No-op */
1602 break;
1603
1604 default:
1605 bfa_sm_fault(event);
1606 }
1607}
1608
1609static void
1610bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1611{
1612 bnad_cb_ioceth_failed(ioceth->bna->bnad);
1613}
1614
1615static void
1616bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1617 enum bna_ioceth_event event)
1618{
1619 switch (event) {
1620 case IOCETH_E_DISABLE:
1621 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1622 bfa_nw_ioc_disable(&ioceth->ioc);
1623 break;
1624
1625 case IOCETH_E_IOC_RESET:
1626 enable_mbox_intr(ioceth);
1627 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1628 break;
1629
1630 case IOCETH_E_IOC_FAILED:
1631 break;
1632
1633 default:
1634 bfa_sm_fault(event);
1635 }
1636}
1637
1638static void
1639bna_bfi_attr_get(struct bna_ioceth *ioceth)
1640{
1641 struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1642
1643 bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1644 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1645 attr_req->mh.num_entries = htons(
1646 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1647 bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1648 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1649 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1650}
1651
1652/* IOC callback functions */
1653
1654static void
1655bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1656{
1657 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1658
1659 if (error)
1660 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1661 else
1662 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1663}
1664
1665static void
1666bna_cb_ioceth_disable(void *arg)
1667{
1668 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1669
1670 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1671}
1672
1673static void
1674bna_cb_ioceth_hbfail(void *arg)
1675{
1676 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1677
1678 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1679}
1680
1681static void
1682bna_cb_ioceth_reset(void *arg)
1683{
1684 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1685
1686 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1687}
1688
1689static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1690 bna_cb_ioceth_enable,
1691 bna_cb_ioceth_disable,
1692 bna_cb_ioceth_hbfail,
1693 bna_cb_ioceth_reset
1694};
1695
1696static void
1697bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1698 struct bna_res_info *res_info)
1699{
1700 u64 dma;
1701 u8 *kva;
1702
1703 ioceth->bna = bna;
1704
1705 /**
1706 * Attach IOC and claim:
1707 * 1. DMA memory for IOC attributes
1708 * 2. Kernel memory for FW trace
1709 */
1710 bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1711 bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1712
1713 BNA_GET_DMA_ADDR(
1714 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1715 kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1716 bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1717
1718 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1719
1720 /**
1721 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1722 * DMA memory.
1723 */
1724 BNA_GET_DMA_ADDR(
1725 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1726 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1727 bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1728 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1729 kva += bfa_nw_cee_meminfo();
1730 dma += bfa_nw_cee_meminfo();
1731
1732 bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1733 bfa_msgq_memclaim(&bna->msgq, kva, dma);
1734 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1735 kva += bfa_msgq_meminfo();
1736 dma += bfa_msgq_meminfo();
1737
1738 ioceth->stop_cbfn = NULL;
1739 ioceth->stop_cbarg = NULL;
1740
1741 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1742}
1743
1744static void
1745bna_ioceth_uninit(struct bna_ioceth *ioceth)
1746{
1747 bfa_nw_ioc_detach(&ioceth->ioc);
1748
1749 ioceth->bna = NULL;
1750}
1751
1752void
1753bna_ioceth_enable(struct bna_ioceth *ioceth)
1754{
1755 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1756 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1757 return;
1758 }
1759
1760 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1761 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1762}
1763
1764void
1765bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1766{
1767 if (type == BNA_SOFT_CLEANUP) {
1768 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1769 return;
1770 }
1771
1772 ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1773 ioceth->stop_cbarg = ioceth->bna->bnad;
1774
1775 bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1776}
1777
1778static void
1779bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1780 struct bna_res_info *res_info)
1781{
1782 int i;
1783
1784 ucam_mod->ucmac = (struct bna_mac *)
1785 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1786
1787 INIT_LIST_HEAD(&ucam_mod->free_q);
1788 for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1789 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1790 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1791 }
1792
1793 ucam_mod->bna = bna;
1794}
1795
1796static void
1797bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1798{
1799 struct list_head *qe;
1800 int i = 0;
1801
1802 list_for_each(qe, &ucam_mod->free_q)
1803 i++;
1804
1805 ucam_mod->bna = NULL;
1806}
1807
1808static void
1809bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1810 struct bna_res_info *res_info)
1811{
1812 int i;
1813
1814 mcam_mod->mcmac = (struct bna_mac *)
1815 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1816
1817 INIT_LIST_HEAD(&mcam_mod->free_q);
1818 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1819 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1820 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1821 }
1822
1823 mcam_mod->mchandle = (struct bna_mcam_handle *)
1824 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1825
1826 INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1827 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1828 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1829 list_add_tail(&mcam_mod->mchandle[i].qe,
1830 &mcam_mod->free_handle_q);
1831 }
1832
1833 mcam_mod->bna = bna;
1834}
1835
1836static void
1837bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1838{
1839 struct list_head *qe;
1840 int i;
1841
1842 i = 0;
1843 list_for_each(qe, &mcam_mod->free_q) i++;
1844
1845 i = 0;
1846 list_for_each(qe, &mcam_mod->free_handle_q) i++;
1847
1848 mcam_mod->bna = NULL;
1849}
1850
1851static void
1852bna_bfi_stats_get(struct bna *bna)
1853{
1854 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1855
1856 bna->stats_mod.stats_get_busy = true;
1857
1858 bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1859 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1860 stats_req->mh.num_entries = htons(
1861 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1862 stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1863 stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1864 stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1865 stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1866 stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1867
1868 bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1869 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1870 bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1871}
1872
1873void
1874bna_res_req(struct bna_res_info *res_info)
1875{
1876 /* DMA memory for COMMON_MODULE */
1877 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1878 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1879 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1880 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1881 (bfa_nw_cee_meminfo() +
1882 bfa_msgq_meminfo()), PAGE_SIZE);
1883
1884 /* DMA memory for retrieving IOC attributes */
1885 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1886 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1887 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1888 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1889 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1890
1891 /* Virtual memory for retreiving fw_trc */
1892 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1893 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1894 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
1895 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
1896
1897 /* DMA memory for retreiving stats */
1898 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1899 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1900 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1901 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1902 ALIGN(sizeof(struct bfi_enet_stats),
1903 PAGE_SIZE);
1904}
1905
1906void
1907bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1908{
1909 struct bna_attr *attr = &bna->ioceth.attr;
1910
1911 /* Virtual memory for Tx objects - stored by Tx module */
1912 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1913 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1914 BNA_MEM_T_KVA;
1915 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1916 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1917 attr->num_txq * sizeof(struct bna_tx);
1918
1919 /* Virtual memory for TxQ - stored by Tx module */
1920 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1921 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1922 BNA_MEM_T_KVA;
1923 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1924 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1925 attr->num_txq * sizeof(struct bna_txq);
1926
1927 /* Virtual memory for Rx objects - stored by Rx module */
1928 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1929 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1930 BNA_MEM_T_KVA;
1931 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1932 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1933 attr->num_rxp * sizeof(struct bna_rx);
1934
1935 /* Virtual memory for RxPath - stored by Rx module */
1936 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1937 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1938 BNA_MEM_T_KVA;
1939 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1940 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1941 attr->num_rxp * sizeof(struct bna_rxp);
1942
1943 /* Virtual memory for RxQ - stored by Rx module */
1944 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1945 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1946 BNA_MEM_T_KVA;
1947 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1948 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1949 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1950
1951 /* Virtual memory for Unicast MAC address - stored by ucam module */
1952 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1953 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1954 BNA_MEM_T_KVA;
1955 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1956 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1957 attr->num_ucmac * sizeof(struct bna_mac);
1958
1959 /* Virtual memory for Multicast MAC address - stored by mcam module */
1960 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1961 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1962 BNA_MEM_T_KVA;
1963 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1964 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1965 attr->num_mcmac * sizeof(struct bna_mac);
1966
1967 /* Virtual memory for Multicast handle - stored by mcam module */
1968 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1969 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1970 BNA_MEM_T_KVA;
1971 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1972 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1973 attr->num_mcmac * sizeof(struct bna_mcam_handle);
1974}
1975
1976void
1977bna_init(struct bna *bna, struct bnad *bnad,
1978 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1979{
1980 bna->bnad = bnad;
1981 bna->pcidev = *pcidev;
1982
1983 bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
1984 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
1985 bna->stats.hw_stats_dma.msb =
1986 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
1987 bna->stats.hw_stats_dma.lsb =
1988 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
1989
1990 bna_reg_addr_init(bna, &bna->pcidev);
1991
1992 /* Also initializes diag, cee, sfp, phy_port, msgq */
1993 bna_ioceth_init(&bna->ioceth, bna, res_info);
1994
1995 bna_enet_init(&bna->enet, bna);
1996 bna_ethport_init(&bna->ethport, bna);
1997}
1998
1999void
2000bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2001{
2002 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2003
2004 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2005
2006 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2007
2008 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2009
2010 bna->default_mode_rid = BFI_INVALID_RID;
2011 bna->promisc_rid = BFI_INVALID_RID;
2012
2013 bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2014}
2015
2016void
2017bna_uninit(struct bna *bna)
2018{
2019 if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2020 bna_mcam_mod_uninit(&bna->mcam_mod);
2021 bna_ucam_mod_uninit(&bna->ucam_mod);
2022 bna_rx_mod_uninit(&bna->rx_mod);
2023 bna_tx_mod_uninit(&bna->tx_mod);
2024 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2025 }
2026
2027 bna_stats_mod_uninit(&bna->stats_mod);
2028 bna_ethport_uninit(&bna->ethport);
2029 bna_enet_uninit(&bna->enet);
2030
2031 bna_ioceth_uninit(&bna->ioceth);
2032
2033 bna->bnad = NULL;
2034}
2035
2036int
2037bna_num_txq_set(struct bna *bna, int num_txq)
2038{
2039 if (num_txq > 0 && (num_txq <= bna->ioceth.attr.num_txq)) {
2040 bna->ioceth.attr.num_txq = num_txq;
2041 return BNA_CB_SUCCESS;
2042 }
2043
2044 return BNA_CB_FAIL;
2045}
2046
2047int
2048bna_num_rxp_set(struct bna *bna, int num_rxp)
2049{
2050 if (num_rxp > 0 && (num_rxp <= bna->ioceth.attr.num_rxp)) {
2051 bna->ioceth.attr.num_rxp = num_rxp;
2052 return BNA_CB_SUCCESS;
2053 }
2054
2055 return BNA_CB_FAIL;
2056}
2057
2058struct bna_mac *
2059bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2060{
2061 struct list_head *qe;
2062
2063 if (list_empty(&ucam_mod->free_q))
2064 return NULL;
2065
2066 bfa_q_deq(&ucam_mod->free_q, &qe);
2067
2068 return (struct bna_mac *)qe;
2069}
2070
2071void
2072bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
2073{
2074 list_add_tail(&mac->qe, &ucam_mod->free_q);
2075}
2076
2077struct bna_mac *
2078bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
2079{
2080 struct list_head *qe;
2081
2082 if (list_empty(&mcam_mod->free_q))
2083 return NULL;
2084
2085 bfa_q_deq(&mcam_mod->free_q, &qe);
2086
2087 return (struct bna_mac *)qe;
2088}
2089
2090void
2091bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
2092{
2093 list_add_tail(&mac->qe, &mcam_mod->free_q);
2094}
2095
2096struct bna_mcam_handle *
2097bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2098{
2099 struct list_head *qe;
2100
2101 if (list_empty(&mcam_mod->free_handle_q))
2102 return NULL;
2103
2104 bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2105
2106 return (struct bna_mcam_handle *)qe;
2107}
2108
2109void
2110bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2111 struct bna_mcam_handle *handle)
2112{
2113 list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2114}
2115
2116void
2117bna_hw_stats_get(struct bna *bna)
2118{
2119 if (!bna->stats_mod.ioc_ready) {
2120 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2121 return;
2122 }
2123 if (bna->stats_mod.stats_get_busy) {
2124 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2125 return;
2126 }
2127
2128 bna_bfi_stats_get(bna);
2129}