aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/brocade/Kconfig22
-rw-r--r--drivers/net/ethernet/brocade/Makefile5
-rw-r--r--drivers/net/ethernet/brocade/bna/Kconfig17
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c304
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h140
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h246
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h223
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h222
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h216
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2326
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h315
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c512
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h400
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h199
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_ll.h438
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h452
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h548
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_ctrl.c3076
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw.h1492
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_txrx.c4185
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h1127
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c3266
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h341
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c895
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h80
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c64
30 files changed, 21187 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9c003f363a9d..225918df224d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -15,6 +15,7 @@ source "drivers/net/ethernet/3com/Kconfig"
15source "drivers/net/ethernet/8390/Kconfig" 15source "drivers/net/ethernet/8390/Kconfig"
16source "drivers/net/ethernet/amd/Kconfig" 16source "drivers/net/ethernet/amd/Kconfig"
17source "drivers/net/ethernet/broadcom/Kconfig" 17source "drivers/net/ethernet/broadcom/Kconfig"
18source "drivers/net/ethernet/brocade/Kconfig"
18source "drivers/net/ethernet/chelsio/Kconfig" 19source "drivers/net/ethernet/chelsio/Kconfig"
19source "drivers/net/ethernet/emulex/Kconfig" 20source "drivers/net/ethernet/emulex/Kconfig"
20source "drivers/net/ethernet/intel/Kconfig" 21source "drivers/net/ethernet/intel/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 2ac05bacab8f..734f7c9d6649 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
6obj-$(CONFIG_NET_VENDOR_8390) += 8390/ 6obj-$(CONFIG_NET_VENDOR_8390) += 8390/
7obj-$(CONFIG_NET_VENDOR_AMD) += amd/ 7obj-$(CONFIG_NET_VENDOR_AMD) += amd/
8obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ 8obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
9obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
9obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 10obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
10obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ 11obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
11obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 12obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig
new file mode 100644
index 000000000000..03f0b17b87c3
--- /dev/null
+++ b/drivers/net/ethernet/brocade/Kconfig
@@ -0,0 +1,22 @@
1#
2# Brocade device configuration
3#
4
5config NET_VENDOR_BROCADE
6 bool "Brocade devices"
7 depends on PCI
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about Brocade cards. If you say Y, you will be asked for
16 your specific card in the following questions.
17
18if NET_VENDOR_BROCADE
19
20source "drivers/net/ethernet/brocade/bna/Kconfig"
21
22endif # NET_VENDOR_BROCADE
diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile
new file mode 100644
index 000000000000..b58238d2df6a
--- /dev/null
+++ b/drivers/net/ethernet/brocade/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Brocade device drivers.
3#
4
5obj-$(CONFIG_BNA) += bna/
diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig
new file mode 100644
index 000000000000..dc2eb526fbf7
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/Kconfig
@@ -0,0 +1,17 @@
1#
2# Brocade network device configuration
3#
4
5config BNA
6 tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
7 depends on PCI
8 ---help---
9 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
10 cards.
11 To compile this driver as a module, choose M here: the module
12 will be called bna.
13
14 For general information and support, go to the Brocade support
15 website at:
16
17 <http://support.brocade.com>
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
new file mode 100644
index 000000000000..a5d604de7fea
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -0,0 +1,11 @@
1#
2# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3# All rights reserved.
4#
5
6obj-$(CONFIG_BNA) += bna.o
7
8bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
9bna-objs += bfa_ioc.o bfa_ioc_ct.o bfa_cee.o cna_fwimg.o
10
11EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
new file mode 100644
index 000000000000..39e5ab9fde59
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -0,0 +1,304 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_defs_cna.h"
20#include "cna.h"
21#include "bfa_cee.h"
22#include "bfi_cna.h"
23#include "bfa_ioc.h"
24
25#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
26#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
27
28static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
29static void bfa_cee_format_cee_cfg(void *buffer);
30
31static void
32bfa_cee_format_cee_cfg(void *buffer)
33{
34 struct bfa_cee_attr *cee_cfg = buffer;
35 bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
36}
37
38static void
39bfa_cee_stats_swap(struct bfa_cee_stats *stats)
40{
41 u32 *buffer = (u32 *)stats;
42 int i;
43
44 for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
45 i++) {
46 buffer[i] = ntohl(buffer[i]);
47 }
48}
49
50static void
51bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
52{
53 lldp_cfg->time_to_live =
54 ntohs(lldp_cfg->time_to_live);
55 lldp_cfg->enabled_system_cap =
56 ntohs(lldp_cfg->enabled_system_cap);
57}
58
59/**
60 * bfa_cee_attr_meminfo()
61 *
62 * @brief Returns the size of the DMA memory needed by CEE attributes
63 *
64 * @param[in] void
65 *
66 * @return Size of DMA region
67 */
68static u32
69bfa_cee_attr_meminfo(void)
70{
71 return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
72}
73/**
74 * bfa_cee_stats_meminfo()
75 *
76 * @brief Returns the size of the DMA memory needed by CEE stats
77 *
78 * @param[in] void
79 *
80 * @return Size of DMA region
81 */
82static u32
83bfa_cee_stats_meminfo(void)
84{
85 return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
86}
87
88/**
89 * bfa_cee_get_attr_isr()
90 *
91 * @brief CEE ISR for get-attributes responses from f/w
92 *
93 * @param[in] cee - Pointer to the CEE module
94 * status - Return status from the f/w
95 *
96 * @return void
97 */
98static void
99bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
100{
101 cee->get_attr_status = status;
102 if (status == BFA_STATUS_OK) {
103 memcpy(cee->attr, cee->attr_dma.kva,
104 sizeof(struct bfa_cee_attr));
105 bfa_cee_format_cee_cfg(cee->attr);
106 }
107 cee->get_attr_pending = false;
108 if (cee->cbfn.get_attr_cbfn)
109 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
110}
111
112/**
113 * bfa_cee_get_attr_isr()
114 *
115 * @brief CEE ISR for get-stats responses from f/w
116 *
117 * @param[in] cee - Pointer to the CEE module
118 * status - Return status from the f/w
119 *
120 * @return void
121 */
122static void
123bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
124{
125 cee->get_stats_status = status;
126 if (status == BFA_STATUS_OK) {
127 memcpy(cee->stats, cee->stats_dma.kva,
128 sizeof(struct bfa_cee_stats));
129 bfa_cee_stats_swap(cee->stats);
130 }
131 cee->get_stats_pending = false;
132 if (cee->cbfn.get_stats_cbfn)
133 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
134}
135
136/**
137 * bfa_cee_get_attr_isr()
138 *
139 * @brief CEE ISR for reset-stats responses from f/w
140 *
141 * @param[in] cee - Pointer to the CEE module
142 * status - Return status from the f/w
143 *
144 * @return void
145 */
146static void
147bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
148{
149 cee->reset_stats_status = status;
150 cee->reset_stats_pending = false;
151 if (cee->cbfn.reset_stats_cbfn)
152 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
153}
154/**
155 * bfa_nw_cee_meminfo()
156 *
157 * @brief Returns the size of the DMA memory needed by CEE module
158 *
159 * @param[in] void
160 *
161 * @return Size of DMA region
162 */
163u32
164bfa_nw_cee_meminfo(void)
165{
166 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
167}
168
169/**
170 * bfa_nw_cee_mem_claim()
171 *
172 * @brief Initialized CEE DMA Memory
173 *
174 * @param[in] cee CEE module pointer
175 * dma_kva Kernel Virtual Address of CEE DMA Memory
176 * dma_pa Physical Address of CEE DMA Memory
177 *
178 * @return void
179 */
180void
181bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
182{
183 cee->attr_dma.kva = dma_kva;
184 cee->attr_dma.pa = dma_pa;
185 cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
186 cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
187 cee->attr = (struct bfa_cee_attr *) dma_kva;
188 cee->stats = (struct bfa_cee_stats *)
189 (dma_kva + bfa_cee_attr_meminfo());
190}
191
192/**
193 * bfa_cee_isrs()
194 *
195 * @brief Handles Mail-box interrupts for CEE module.
196 *
197 * @param[in] Pointer to the CEE module data structure.
198 *
199 * @return void
200 */
201
202static void
203bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
204{
205 union bfi_cee_i2h_msg_u *msg;
206 struct bfi_cee_get_rsp *get_rsp;
207 struct bfa_cee *cee = (struct bfa_cee *) cbarg;
208 msg = (union bfi_cee_i2h_msg_u *) m;
209 get_rsp = (struct bfi_cee_get_rsp *) m;
210 switch (msg->mh.msg_id) {
211 case BFI_CEE_I2H_GET_CFG_RSP:
212 bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
213 break;
214 case BFI_CEE_I2H_GET_STATS_RSP:
215 bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
216 break;
217 case BFI_CEE_I2H_RESET_STATS_RSP:
218 bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
219 break;
220 default:
221 BUG_ON(1);
222 }
223}
224
225/**
226 * bfa_cee_notify()
227 *
228 * @brief CEE module heart-beat failure handler.
229 * @brief CEE module IOC event handler.
230 *
231 * @param[in] IOC event type
232 *
233 * @return void
234 */
235
236static void
237bfa_cee_notify(void *arg, enum bfa_ioc_event event)
238{
239 struct bfa_cee *cee;
240 cee = (struct bfa_cee *) arg;
241
242 switch (event) {
243 case BFA_IOC_E_DISABLED:
244 case BFA_IOC_E_FAILED:
245 if (cee->get_attr_pending == true) {
246 cee->get_attr_status = BFA_STATUS_FAILED;
247 cee->get_attr_pending = false;
248 if (cee->cbfn.get_attr_cbfn) {
249 cee->cbfn.get_attr_cbfn(
250 cee->cbfn.get_attr_cbarg,
251 BFA_STATUS_FAILED);
252 }
253 }
254 if (cee->get_stats_pending == true) {
255 cee->get_stats_status = BFA_STATUS_FAILED;
256 cee->get_stats_pending = false;
257 if (cee->cbfn.get_stats_cbfn) {
258 cee->cbfn.get_stats_cbfn(
259 cee->cbfn.get_stats_cbarg,
260 BFA_STATUS_FAILED);
261 }
262 }
263 if (cee->reset_stats_pending == true) {
264 cee->reset_stats_status = BFA_STATUS_FAILED;
265 cee->reset_stats_pending = false;
266 if (cee->cbfn.reset_stats_cbfn) {
267 cee->cbfn.reset_stats_cbfn(
268 cee->cbfn.reset_stats_cbarg,
269 BFA_STATUS_FAILED);
270 }
271 }
272 break;
273
274 default:
275 break;
276 }
277}
278
279/**
280 * bfa_nw_cee_attach()
281 *
282 * @brief CEE module-attach API
283 *
284 * @param[in] cee - Pointer to the CEE module data structure
285 * ioc - Pointer to the ioc module data structure
286 * dev - Pointer to the device driver module data structure
287 * The device driver specific mbox ISR functions have
288 * this pointer as one of the parameters.
289 *
290 * @return void
291 */
292void
293bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
294 void *dev)
295{
296 BUG_ON(!(cee != NULL));
297 cee->dev = dev;
298 cee->ioc = ioc;
299
300 bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
301 bfa_q_qe_init(&cee->ioc_notify);
302 bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
303 bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
304}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
new file mode 100644
index 000000000000..58d54e98d595
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
@@ -0,0 +1,63 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_CEE_H__
20#define __BFA_CEE_H__
21
22#include "bfa_defs_cna.h"
23#include "bfa_ioc.h"
24
25typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
26typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
27typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
28
29struct bfa_cee_cbfn {
30 bfa_cee_get_attr_cbfn_t get_attr_cbfn;
31 void *get_attr_cbarg;
32 bfa_cee_get_stats_cbfn_t get_stats_cbfn;
33 void *get_stats_cbarg;
34 bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
35 void *reset_stats_cbarg;
36};
37
38struct bfa_cee {
39 void *dev;
40 bool get_attr_pending;
41 bool get_stats_pending;
42 bool reset_stats_pending;
43 enum bfa_status get_attr_status;
44 enum bfa_status get_stats_status;
45 enum bfa_status reset_stats_status;
46 struct bfa_cee_cbfn cbfn;
47 struct bfa_ioc_notify ioc_notify;
48 struct bfa_cee_attr *attr;
49 struct bfa_cee_stats *stats;
50 struct bfa_dma attr_dma;
51 struct bfa_dma stats_dma;
52 struct bfa_ioc *ioc;
53 struct bfa_mbox_cmd get_cfg_mb;
54 struct bfa_mbox_cmd get_stats_mb;
55 struct bfa_mbox_cmd reset_stats_mb;
56};
57
58u32 bfa_nw_cee_meminfo(void);
59void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
60 u64 dma_pa);
61void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
62
63#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
new file mode 100644
index 000000000000..3da1a946ccdd
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -0,0 +1,140 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/**
20 * @file bfa_cs.h BFA common services
21 */
22
23#ifndef __BFA_CS_H__
24#define __BFA_CS_H__
25
26#include "cna.h"
27
28/**
29 * @ BFA state machine interfaces
30 */
31
32typedef void (*bfa_sm_t)(void *sm, int event);
33
34/**
35 * oc - object class eg. bfa_ioc
36 * st - state, eg. reset
37 * otype - object type, eg. struct bfa_ioc
38 * etype - object type, eg. enum ioc_event
39 */
40#define bfa_sm_state_decl(oc, st, otype, etype) \
41 static void oc ## _sm_ ## st(otype * fsm, etype event)
42
43#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
44#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
45#define bfa_sm_get_state(_sm) ((_sm)->sm)
46#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
47
48/**
49 * For converting from state machine function to state encoding.
50 */
51struct bfa_sm_table {
52 bfa_sm_t sm; /*!< state machine function */
53 int state; /*!< state machine encoding */
54 char *name; /*!< state name for display */
55};
56#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
57
58/**
59 * State machine with entry actions.
60 */
61typedef void (*bfa_fsm_t)(void *fsm, int event);
62
63/**
64 * oc - object class eg. bfa_ioc
65 * st - state, eg. reset
66 * otype - object type, eg. struct bfa_ioc
67 * etype - object type, eg. enum ioc_event
68 */
69#define bfa_fsm_state_decl(oc, st, otype, etype) \
70 static void oc ## _sm_ ## st(otype * fsm, etype event); \
71 static void oc ## _sm_ ## st ## _entry(otype * fsm)
72
73#define bfa_fsm_set_state(_fsm, _state) do { \
74 (_fsm)->fsm = (bfa_fsm_t)(_state); \
75 _state ## _entry(_fsm); \
76} while (0)
77
78#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
79#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
80#define bfa_fsm_cmp_state(_fsm, _state) \
81 ((_fsm)->fsm == (bfa_fsm_t)(_state))
82
83static inline int
84bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
85{
86 int i = 0;
87
88 while (smt[i].sm && smt[i].sm != sm)
89 i++;
90 return smt[i].state;
91}
92
93/**
94 * @ Generic wait counter.
95 */
96
97typedef void (*bfa_wc_resume_t) (void *cbarg);
98
99struct bfa_wc {
100 bfa_wc_resume_t wc_resume;
101 void *wc_cbarg;
102 int wc_count;
103};
104
105static inline void
106bfa_wc_up(struct bfa_wc *wc)
107{
108 wc->wc_count++;
109}
110
111static inline void
112bfa_wc_down(struct bfa_wc *wc)
113{
114 wc->wc_count--;
115 if (wc->wc_count == 0)
116 wc->wc_resume(wc->wc_cbarg);
117}
118
119/**
120 * Initialize a waiting counter.
121 */
122static inline void
123bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
124{
125 wc->wc_resume = wc_resume;
126 wc->wc_cbarg = wc_cbarg;
127 wc->wc_count = 0;
128 bfa_wc_up(wc);
129}
130
131/**
132 * Wait for counter to reach zero
133 */
134static inline void
135bfa_wc_wait(struct bfa_wc *wc)
136{
137 bfa_wc_down(wc);
138}
139
140#endif /* __BFA_CS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
new file mode 100644
index 000000000000..b080b3698f48
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -0,0 +1,246 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_DEFS_H__
20#define __BFA_DEFS_H__
21
22#include "cna.h"
23#include "bfa_defs_status.h"
24#include "bfa_defs_mfg_comm.h"
25
26#define BFA_STRING_32 32
27#define BFA_VERSION_LEN 64
28
29/**
30 * ---------------------- adapter definitions ------------
31 */
32
33/**
34 * BFA adapter level attributes.
35 */
36enum {
37 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
38 /*
39 *!< adapter serial num length
40 */
41 BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */
42 BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
43 BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */
44 BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */
45 BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */
46};
47
48struct bfa_adapter_attr {
49 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
50 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
51 u32 card_type;
52 char model[BFA_ADAPTER_MODEL_NAME_LEN];
53 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
54 u64 pwwn;
55 char node_symname[FC_SYMNAME_MAX];
56 char hw_ver[BFA_VERSION_LEN];
57 char fw_ver[BFA_VERSION_LEN];
58 char optrom_ver[BFA_VERSION_LEN];
59 char os_type[BFA_ADAPTER_OS_TYPE_LEN];
60 struct bfa_mfg_vpd vpd;
61 struct mac mac;
62
63 u8 nports;
64 u8 max_speed;
65 u8 prototype;
66 char asic_rev;
67
68 u8 pcie_gen;
69 u8 pcie_lanes_orig;
70 u8 pcie_lanes;
71 u8 cna_capable;
72
73 u8 is_mezz;
74 u8 trunk_capable;
75};
76
77/**
78 * ---------------------- IOC definitions ------------
79 */
80
81enum {
82 BFA_IOC_DRIVER_LEN = 16,
83 BFA_IOC_CHIP_REV_LEN = 8,
84};
85
86/**
87 * Driver and firmware versions.
88 */
89struct bfa_ioc_driver_attr {
90 char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
91 char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
92 char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */
93 char bios_ver[BFA_VERSION_LEN]; /*!< bios version */
94 char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */
95 char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
96};
97
98/**
99 * IOC PCI device attributes
100 */
101struct bfa_ioc_pci_attr {
102 u16 vendor_id; /*!< PCI vendor ID */
103 u16 device_id; /*!< PCI device ID */
104 u16 ssid; /*!< subsystem ID */
105 u16 ssvid; /*!< subsystem vendor ID */
106 u32 pcifn; /*!< PCI device function */
107 u32 rsvd; /* padding */
108 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
109};
110
111/**
112 * IOC states
113 */
114enum bfa_ioc_state {
115 BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
116 BFA_IOC_RESET = 2, /*!< IOC is in reset state */
117 BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */
118 BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */
119 BFA_IOC_GETATTR = 5, /*!< IOC is being configured */
120 BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */
121 BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */
122 BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */
123 BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */
124 BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
125 BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
126 BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
127};
128
129/**
130 * IOC firmware stats
131 */
132struct bfa_fw_ioc_stats {
133 u32 enable_reqs;
134 u32 disable_reqs;
135 u32 get_attr_reqs;
136 u32 dbg_sync;
137 u32 dbg_dump;
138 u32 unknown_reqs;
139};
140
141/**
142 * IOC driver stats
143 */
144struct bfa_ioc_drv_stats {
145 u32 ioc_isrs;
146 u32 ioc_enables;
147 u32 ioc_disables;
148 u32 ioc_hbfails;
149 u32 ioc_boots;
150 u32 stats_tmos;
151 u32 hb_count;
152 u32 disable_reqs;
153 u32 enable_reqs;
154 u32 disable_replies;
155 u32 enable_replies;
156 u32 rsvd;
157};
158
159/**
160 * IOC statistics
161 */
162struct bfa_ioc_stats {
163 struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
164 struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
165};
166
167enum bfa_ioc_type {
168 BFA_IOC_TYPE_FC = 1,
169 BFA_IOC_TYPE_FCoE = 2,
170 BFA_IOC_TYPE_LL = 3,
171};
172
173/**
174 * IOC attributes returned in queries
175 */
176struct bfa_ioc_attr {
177 enum bfa_ioc_type ioc_type;
178 enum bfa_ioc_state state; /*!< IOC state */
179 struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
180 struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
181 struct bfa_ioc_pci_attr pci_attr;
182 u8 port_id; /*!< port number */
183 u8 rsvd[7]; /*!< 64bit align */
184};
185
186/**
187 * ---------------------- mfg definitions ------------
188 */
189
190/**
191 * Checksum size
192 */
193#define BFA_MFG_CHKSUM_SIZE 16
194
195#define BFA_MFG_PARTNUM_SIZE 14
196#define BFA_MFG_SUPPLIER_ID_SIZE 10
197#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
198#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
199#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
200
201#pragma pack(1)
202
203/**
204 * @brief BFA adapter manufacturing block definition.
205 *
206 * All numerical fields are in big-endian format.
207 */
208struct bfa_mfg_block {
209 u8 version; /*!< manufacturing block version */
210 u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
211 u16 mfgsize; /*!< mfg block size */
212 u16 u16_chksum; /*!< old u16 checksum */
213 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
214 char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
215 u8 mfg_day; /*!< manufacturing day */
216 u8 mfg_month; /*!< manufacturing month */
217 u16 mfg_year; /*!< manufacturing year */
218 u64 mfg_wwn; /*!< wwn base for this adapter */
219 u8 num_wwn; /*!< number of wwns assigned */
220 u8 mfg_speeds; /*!< speeds allowed for this adapter */
221 u8 rsv[2];
222 char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
223 char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
224 char
225 supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
226 char
227 supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
228 mac_t mfg_mac; /*!< mac address */
229 u8 num_mac; /*!< number of mac addresses */
230 u8 rsv2;
231 u32 mfg_type; /*!< card type */
232 u8 rsv3[108];
233 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
234};
235
236#pragma pack()
237
238/**
239 * ---------------------- pci definitions ------------
240 */
241
242#define bfa_asic_id_ct(devid) \
243 ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
244 (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
245
246#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
new file mode 100644
index 000000000000..7e0a9187bdd5
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -0,0 +1,223 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_DEFS_CNA_H__
20#define __BFA_DEFS_CNA_H__
21
22#include "bfa_defs.h"
23
24/**
25 * @brief
26 * FC physical port statistics.
27 */
28struct bfa_port_fc_stats {
29 u64 secs_reset; /*!< Seconds since stats is reset */
30 u64 tx_frames; /*!< Tx frames */
31 u64 tx_words; /*!< Tx words */
32 u64 tx_lip; /*!< Tx LIP */
33 u64 tx_nos; /*!< Tx NOS */
34 u64 tx_ols; /*!< Tx OLS */
35 u64 tx_lr; /*!< Tx LR */
36 u64 tx_lrr; /*!< Tx LRR */
37 u64 rx_frames; /*!< Rx frames */
38 u64 rx_words; /*!< Rx words */
39 u64 lip_count; /*!< Rx LIP */
40 u64 nos_count; /*!< Rx NOS */
41 u64 ols_count; /*!< Rx OLS */
42 u64 lr_count; /*!< Rx LR */
43 u64 lrr_count; /*!< Rx LRR */
44 u64 invalid_crcs; /*!< Rx CRC err frames */
45 u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
46 u64 undersized_frm; /*!< Rx undersized frames */
47 u64 oversized_frm; /*!< Rx oversized frames */
48 u64 bad_eof_frm; /*!< Rx frames with bad EOF */
49 u64 error_frames; /*!< Errored frames */
50 u64 dropped_frames; /*!< Dropped frames */
51 u64 link_failures; /*!< Link Failure (LF) count */
52 u64 loss_of_syncs; /*!< Loss of sync count */
53 u64 loss_of_signals; /*!< Loss of signal count */
54 u64 primseq_errs; /*!< Primitive sequence protocol err. */
55 u64 bad_os_count; /*!< Invalid ordered sets */
56 u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
57 u64 err_enc; /*!< Encoding err frame_8b10b */
58};
59
60/**
61 * @brief
62 * Eth Physical Port statistics.
63 */
64struct bfa_port_eth_stats {
65 u64 secs_reset; /*!< Seconds since stats is reset */
66 u64 frame_64; /*!< Frames 64 bytes */
67 u64 frame_65_127; /*!< Frames 65-127 bytes */
68 u64 frame_128_255; /*!< Frames 128-255 bytes */
69 u64 frame_256_511; /*!< Frames 256-511 bytes */
70 u64 frame_512_1023; /*!< Frames 512-1023 bytes */
71 u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */
72 u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */
73 u64 tx_bytes; /*!< Tx bytes */
74 u64 tx_packets; /*!< Tx packets */
75 u64 tx_mcast_packets; /*!< Tx multicast packets */
76 u64 tx_bcast_packets; /*!< Tx broadcast packets */
77 u64 tx_control_frame; /*!< Tx control frame */
78 u64 tx_drop; /*!< Tx drops */
79 u64 tx_jabber; /*!< Tx jabber */
80 u64 tx_fcs_error; /*!< Tx FCS errors */
81 u64 tx_fragments; /*!< Tx fragments */
82 u64 rx_bytes; /*!< Rx bytes */
83 u64 rx_packets; /*!< Rx packets */
84 u64 rx_mcast_packets; /*!< Rx multicast packets */
85 u64 rx_bcast_packets; /*!< Rx broadcast packets */
86 u64 rx_control_frames; /*!< Rx control frames */
87 u64 rx_unknown_opcode; /*!< Rx unknown opcode */
88 u64 rx_drop; /*!< Rx drops */
89 u64 rx_jabber; /*!< Rx jabber */
90 u64 rx_fcs_error; /*!< Rx FCS errors */
91 u64 rx_alignment_error; /*!< Rx alignment errors */
92 u64 rx_frame_length_error; /*!< Rx frame len errors */
93 u64 rx_code_error; /*!< Rx code errors */
94 u64 rx_fragments; /*!< Rx fragments */
95 u64 rx_pause; /*!< Rx pause */
96 u64 rx_zero_pause; /*!< Rx zero pause */
97 u64 tx_pause; /*!< Tx pause */
98 u64 tx_zero_pause; /*!< Tx zero pause */
99 u64 rx_fcoe_pause; /*!< Rx FCoE pause */
100 u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
101 u64 tx_fcoe_pause; /*!< Tx FCoE pause */
102 u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
103};
104
105/**
106 * @brief
107 * Port statistics.
108 */
109union bfa_port_stats_u {
110 struct bfa_port_fc_stats fc;
111 struct bfa_port_eth_stats eth;
112};
113
114#pragma pack(1)
115
116#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
117#define BFA_CEE_DCBX_MAX_PRIORITY (8)
118#define BFA_CEE_DCBX_MAX_PGID (8)
119
120#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
121#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
122#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
123#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
124#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
125#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
126#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
127#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
128#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
129#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
130#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
131
132/* LLDP string type */
133struct bfa_cee_lldp_str {
134 u8 sub_type;
135 u8 len;
136 u8 rsvd[2];
137 u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
138};
139
140/* LLDP paramters */
141struct bfa_cee_lldp_cfg {
142 struct bfa_cee_lldp_str chassis_id;
143 struct bfa_cee_lldp_str port_id;
144 struct bfa_cee_lldp_str port_desc;
145 struct bfa_cee_lldp_str sys_name;
146 struct bfa_cee_lldp_str sys_desc;
147 struct bfa_cee_lldp_str mgmt_addr;
148 u16 time_to_live;
149 u16 enabled_system_cap;
150};
151
152enum bfa_cee_dcbx_version {
153 DCBX_PROTOCOL_PRECEE = 1,
154 DCBX_PROTOCOL_CEE = 2,
155};
156
157enum bfa_cee_lls {
158 /* LLS is down because the TLV not sent by the peer */
159 CEE_LLS_DOWN_NO_TLV = 0,
160 /* LLS is down as advertised by the peer */
161 CEE_LLS_DOWN = 1,
162 CEE_LLS_UP = 2,
163};
164
165/* CEE/DCBX parameters */
166struct bfa_cee_dcbx_cfg {
167 u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
168 u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
169 u8 pfc_primap; /* bitmap of priorties with PFC enabled */
170 u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
171 u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
172 u8 dcbx_version; /* operating version:CEE or preCEE */
173 u8 lls_fcoe; /* FCoE Logical Link Status */
174 u8 lls_lan; /* LAN Logical Link Status */
175 u8 rsvd[2];
176};
177
178/* CEE status */
179/* Making this to tri-state for the benefit of port list command */
180enum bfa_cee_status {
181 CEE_UP = 0,
182 CEE_PHY_UP = 1,
183 CEE_LOOPBACK = 2,
184 CEE_PHY_DOWN = 3,
185};
186
187/* CEE Query */
188struct bfa_cee_attr {
189 u8 cee_status;
190 u8 error_reason;
191 struct bfa_cee_lldp_cfg lldp_remote;
192 struct bfa_cee_dcbx_cfg dcbx_remote;
193 mac_t src_mac;
194 u8 link_speed;
195 u8 nw_priority;
196 u8 filler[2];
197};
198
199/* LLDP/DCBX/CEE Statistics */
200struct bfa_cee_stats {
201 u32 lldp_tx_frames; /*!< LLDP Tx Frames */
202 u32 lldp_rx_frames; /*!< LLDP Rx Frames */
203 u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */
204 u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */
205 u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */
206 u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */
207 u32 lldp_info_aged_out; /*!< LLDP remote info aged out */
208 u32 dcbx_phylink_ups; /*!< DCBX phy link ups */
209 u32 dcbx_phylink_downs; /*!< DCBX phy link downs */
210 u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */
211 u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */
212 u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */
213 u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */
214 u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */
215 u32 cee_status_down; /*!< CEE status down */
216 u32 cee_status_up; /*!< CEE status up */
217 u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
218 u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
219};
220
221#pragma pack()
222
223#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
new file mode 100644
index 000000000000..885ef3afdd4e
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -0,0 +1,222 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFA_DEFS_MFG_COMM_H__
19#define __BFA_DEFS_MFG_COMM_H__
20
21#include "cna.h"
22
23/**
24 * Manufacturing block version
25 */
26#define BFA_MFG_VERSION 2
27#define BFA_MFG_VERSION_UNINIT 0xFF
28
29/**
30 * Manufacturing block encrypted version
31 */
32#define BFA_MFG_ENC_VER 2
33
34/**
35 * Manufacturing block version 1 length
36 */
37#define BFA_MFG_VER1_LEN 128
38
39/**
40 * Manufacturing block header length
41 */
42#define BFA_MFG_HDR_LEN 4
43
44#define BFA_MFG_SERIALNUM_SIZE 11
45#define STRSZ(_n) (((_n) + 4) & ~3)
46
47/**
48 * Manufacturing card type
49 */
50enum {
51 BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
52 BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
53 BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
54 BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
55 BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
56 BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
57 BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
58 BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
59 BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
60 BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
61 BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
62 BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
63 BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
64};
65
66#pragma pack(1)
67
68/**
69 * Check if 1-port card
70 */
71#define bfa_mfg_is_1port(type) (( \
72 (type) == BFA_MFG_TYPE_FC8P1 || \
73 (type) == BFA_MFG_TYPE_FC4P1 || \
74 (type) == BFA_MFG_TYPE_CNA10P1))
75
76/**
77 * Check if Mezz card
78 */
79#define bfa_mfg_is_mezz(type) (( \
80 (type) == BFA_MFG_TYPE_JAYHAWK || \
81 (type) == BFA_MFG_TYPE_WANCHESE || \
82 (type) == BFA_MFG_TYPE_ASTRA || \
83 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
84 (type) == BFA_MFG_TYPE_LIGHTNING))
85
86/**
87 * Check if card type valid
88 */
89#define bfa_mfg_is_card_type_valid(type) (( \
90 (type) == BFA_MFG_TYPE_FC8P2 || \
91 (type) == BFA_MFG_TYPE_FC8P1 || \
92 (type) == BFA_MFG_TYPE_FC4P2 || \
93 (type) == BFA_MFG_TYPE_FC4P1 || \
94 (type) == BFA_MFG_TYPE_CNA10P2 || \
95 (type) == BFA_MFG_TYPE_CNA10P1 || \
96 bfa_mfg_is_mezz(type)))
97
98#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
99do { \
100 switch ((card_type)) { \
101 case BFA_MFG_TYPE_FC8P2: \
102 case BFA_MFG_TYPE_JAYHAWK: \
103 case BFA_MFG_TYPE_ASTRA: \
104 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
105 BFI_ADAPTER_SETP(SPEED, 8); \
106 break; \
107 case BFA_MFG_TYPE_FC8P1: \
108 (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
109 BFI_ADAPTER_SETP(SPEED, 8); \
110 break; \
111 case BFA_MFG_TYPE_FC4P2: \
112 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
113 BFI_ADAPTER_SETP(SPEED, 4); \
114 break; \
115 case BFA_MFG_TYPE_FC4P1: \
116 (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
117 BFI_ADAPTER_SETP(SPEED, 4); \
118 break; \
119 case BFA_MFG_TYPE_CNA10P2: \
120 case BFA_MFG_TYPE_WANCHESE: \
121 case BFA_MFG_TYPE_LIGHTNING_P0: \
122 case BFA_MFG_TYPE_LIGHTNING: \
123 (prop) = BFI_ADAPTER_SETP(NPORTS, 2); \
124 (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
125 break; \
126 case BFA_MFG_TYPE_CNA10P1: \
127 (prop) = BFI_ADAPTER_SETP(NPORTS, 1); \
128 (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
129 break; \
130 default: \
131 (prop) = BFI_ADAPTER_UNSUPP; \
132 } \
133} while (0)
134
135enum {
136 CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
137 CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
138 CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
139 CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
140 CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
141 CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
142 CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
143};
144
145#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
146do { \
147 if ((gpio) & CB_GPIO_PROTO) { \
148 (prop) |= BFI_ADAPTER_PROTO; \
149 (gpio) &= ~CB_GPIO_PROTO; \
150 } \
151 switch ((gpio)) { \
152 case CB_GPIO_TTV: \
153 (prop) |= BFI_ADAPTER_TTV; \
154 case CB_GPIO_DFLY: \
155 case CB_GPIO_FC8P2: \
156 (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
157 (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
158 (card_type) = BFA_MFG_TYPE_FC8P2; \
159 break; \
160 case CB_GPIO_FC8P1: \
161 (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
162 (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
163 (card_type) = BFA_MFG_TYPE_FC8P1; \
164 break; \
165 case CB_GPIO_FC4P2: \
166 (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
167 (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
168 (card_type) = BFA_MFG_TYPE_FC4P2; \
169 break; \
170 case CB_GPIO_FC4P1: \
171 (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
172 (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
173 (card_type) = BFA_MFG_TYPE_FC4P1; \
174 break; \
175 default: \
176 (prop) |= BFI_ADAPTER_UNSUPP; \
177 (card_type) = BFA_MFG_TYPE_INVALID; \
178 } \
179} while (0)
180
181/**
182 * VPD data length
183 */
184#define BFA_MFG_VPD_LEN 512
185#define BFA_MFG_VPD_LEN_INVALID 0
186
187#define BFA_MFG_VPD_PCI_HDR_OFF 137
188#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
189#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
190
191/**
192 * VPD vendor tag
193 */
194enum {
195 BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
196 BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
197 BFA_MFG_VPD_HP = 2, /*!< vendor HP */
198 BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
199 BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
200 BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
201 BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
202 BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
203};
204
205/**
206 * @brief BFA adapter flash vpd data definition.
207 *
208 * All numerical fields are in big-endian format.
209 */
210struct bfa_mfg_vpd {
211 u8 version; /*!< vpd data version */
212 u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
213 u8 chksum; /*!< u8 checksum */
214 u8 vendor; /*!< vendor */
215 u8 len; /*!< vpd data length excluding header */
216 u8 rsv;
217 u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
218};
219
220#pragma pack()
221
222#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
new file mode 100644
index 000000000000..7c5fe6c2e80e
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -0,0 +1,216 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFA_DEFS_STATUS_H__
19#define __BFA_DEFS_STATUS_H__
20
21/**
22 * API status return values
23 *
24 * NOTE: The error msgs are auto generated from the comments. Only singe line
25 * comments are supported
26 */
27enum bfa_status {
28 BFA_STATUS_OK = 0,
29 BFA_STATUS_FAILED = 1,
30 BFA_STATUS_EINVAL = 2,
31 BFA_STATUS_ENOMEM = 3,
32 BFA_STATUS_ENOSYS = 4,
33 BFA_STATUS_ETIMER = 5,
34 BFA_STATUS_EPROTOCOL = 6,
35 BFA_STATUS_ENOFCPORTS = 7,
36 BFA_STATUS_NOFLASH = 8,
37 BFA_STATUS_BADFLASH = 9,
38 BFA_STATUS_SFP_UNSUPP = 10,
39 BFA_STATUS_UNKNOWN_VFID = 11,
40 BFA_STATUS_DATACORRUPTED = 12,
41 BFA_STATUS_DEVBUSY = 13,
42 BFA_STATUS_ABORTED = 14,
43 BFA_STATUS_NODEV = 15,
44 BFA_STATUS_HDMA_FAILED = 16,
45 BFA_STATUS_FLASH_BAD_LEN = 17,
46 BFA_STATUS_UNKNOWN_LWWN = 18,
47 BFA_STATUS_UNKNOWN_RWWN = 19,
48 BFA_STATUS_FCPT_LS_RJT = 20,
49 BFA_STATUS_VPORT_EXISTS = 21,
50 BFA_STATUS_VPORT_MAX = 22,
51 BFA_STATUS_UNSUPP_SPEED = 23,
52 BFA_STATUS_INVLD_DFSZ = 24,
53 BFA_STATUS_CNFG_FAILED = 25,
54 BFA_STATUS_CMD_NOTSUPP = 26,
55 BFA_STATUS_NO_ADAPTER = 27,
56 BFA_STATUS_LINKDOWN = 28,
57 BFA_STATUS_FABRIC_RJT = 29,
58 BFA_STATUS_UNKNOWN_VWWN = 30,
59 BFA_STATUS_NSLOGIN_FAILED = 31,
60 BFA_STATUS_NO_RPORTS = 32,
61 BFA_STATUS_NSQUERY_FAILED = 33,
62 BFA_STATUS_PORT_OFFLINE = 34,
63 BFA_STATUS_RPORT_OFFLINE = 35,
64 BFA_STATUS_TGTOPEN_FAILED = 36,
65 BFA_STATUS_BAD_LUNS = 37,
66 BFA_STATUS_IO_FAILURE = 38,
67 BFA_STATUS_NO_FABRIC = 39,
68 BFA_STATUS_EBADF = 40,
69 BFA_STATUS_EINTR = 41,
70 BFA_STATUS_EIO = 42,
71 BFA_STATUS_ENOTTY = 43,
72 BFA_STATUS_ENXIO = 44,
73 BFA_STATUS_EFOPEN = 45,
74 BFA_STATUS_VPORT_WWN_BP = 46,
75 BFA_STATUS_PORT_NOT_DISABLED = 47,
76 BFA_STATUS_BADFRMHDR = 48,
77 BFA_STATUS_BADFRMSZ = 49,
78 BFA_STATUS_MISSINGFRM = 50,
79 BFA_STATUS_LINKTIMEOUT = 51,
80 BFA_STATUS_NO_FCPIM_NEXUS = 52,
81 BFA_STATUS_CHECKSUM_FAIL = 53,
82 BFA_STATUS_GZME_FAILED = 54,
83 BFA_STATUS_SCSISTART_REQD = 55,
84 BFA_STATUS_IOC_FAILURE = 56,
85 BFA_STATUS_INVALID_WWN = 57,
86 BFA_STATUS_MISMATCH = 58,
87 BFA_STATUS_IOC_ENABLED = 59,
88 BFA_STATUS_ADAPTER_ENABLED = 60,
89 BFA_STATUS_IOC_NON_OP = 61,
90 BFA_STATUS_ADDR_MAP_FAILURE = 62,
91 BFA_STATUS_SAME_NAME = 63,
92 BFA_STATUS_PENDING = 64,
93 BFA_STATUS_8G_SPD = 65,
94 BFA_STATUS_4G_SPD = 66,
95 BFA_STATUS_AD_IS_ENABLE = 67,
96 BFA_STATUS_EINVAL_TOV = 68,
97 BFA_STATUS_EINVAL_QDEPTH = 69,
98 BFA_STATUS_VERSION_FAIL = 70,
99 BFA_STATUS_DIAG_BUSY = 71,
100 BFA_STATUS_BEACON_ON = 72,
101 BFA_STATUS_BEACON_OFF = 73,
102 BFA_STATUS_LBEACON_ON = 74,
103 BFA_STATUS_LBEACON_OFF = 75,
104 BFA_STATUS_PORT_NOT_INITED = 76,
105 BFA_STATUS_RPSC_ENABLED = 77,
106 BFA_STATUS_ENOFSAVE = 78,
107 BFA_STATUS_BAD_FILE = 79,
108 BFA_STATUS_RLIM_EN = 80,
109 BFA_STATUS_RLIM_DIS = 81,
110 BFA_STATUS_IOC_DISABLED = 82,
111 BFA_STATUS_ADAPTER_DISABLED = 83,
112 BFA_STATUS_BIOS_DISABLED = 84,
113 BFA_STATUS_AUTH_ENABLED = 85,
114 BFA_STATUS_AUTH_DISABLED = 86,
115 BFA_STATUS_ERROR_TRL_ENABLED = 87,
116 BFA_STATUS_ERROR_QOS_ENABLED = 88,
117 BFA_STATUS_NO_SFP_DEV = 89,
118 BFA_STATUS_MEMTEST_FAILED = 90,
119 BFA_STATUS_INVALID_DEVID = 91,
120 BFA_STATUS_QOS_ENABLED = 92,
121 BFA_STATUS_QOS_DISABLED = 93,
122 BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
123 BFA_STATUS_REG_FAIL = 95,
124 BFA_STATUS_IM_INV_CODE = 96,
125 BFA_STATUS_IM_INV_VLAN = 97,
126 BFA_STATUS_IM_INV_ADAPT_NAME = 98,
127 BFA_STATUS_IM_LOW_RESOURCES = 99,
128 BFA_STATUS_IM_VLANID_IS_PVID = 100,
129 BFA_STATUS_IM_VLANID_EXISTS = 101,
130 BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
131 BFA_STATUS_PORTLOG_ENABLED = 103,
132 BFA_STATUS_PORTLOG_DISABLED = 104,
133 BFA_STATUS_FILE_NOT_FOUND = 105,
134 BFA_STATUS_QOS_FC_ONLY = 106,
135 BFA_STATUS_RLIM_FC_ONLY = 107,
136 BFA_STATUS_CT_SPD = 108,
137 BFA_STATUS_LEDTEST_OP = 109,
138 BFA_STATUS_CEE_NOT_DN = 110,
139 BFA_STATUS_10G_SPD = 111,
140 BFA_STATUS_IM_INV_TEAM_NAME = 112,
141 BFA_STATUS_IM_DUP_TEAM_NAME = 113,
142 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
143 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
144 BFA_STATUS_IM_PVID_MISMATCH = 116,
145 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
146 BFA_STATUS_IM_MTU_MISMATCH = 118,
147 BFA_STATUS_IM_RSS_MISMATCH = 119,
148 BFA_STATUS_IM_HDS_MISMATCH = 120,
149 BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
150 BFA_STATUS_IM_PORT_PARAMS = 122,
151 BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
152 BFA_STATUS_IM_CANNOT_REM_PRI = 124,
153 BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
154 BFA_STATUS_IM_LAST_PORT_DELETE = 126,
155 BFA_STATUS_IM_NO_DRIVER = 127,
156 BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
157 BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
158 BFA_STATUS_NO_MINPORT_DRIVER = 130,
159 BFA_STATUS_CARD_TYPE_MISMATCH = 131,
160 BFA_STATUS_BAD_ASICBLK = 132,
161 BFA_STATUS_NO_DRIVER = 133,
162 BFA_STATUS_INVALID_MAC = 134,
163 BFA_STATUS_IM_NO_VLAN = 135,
164 BFA_STATUS_IM_ETH_LB_FAILED = 136,
165 BFA_STATUS_IM_PVID_REMOVE = 137,
166 BFA_STATUS_IM_PVID_EDIT = 138,
167 BFA_STATUS_CNA_NO_BOOT = 139,
168 BFA_STATUS_IM_PVID_NON_ZERO = 140,
169 BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
170 BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
171 BFA_STATUS_IM_NOT_BOUND = 143,
172 BFA_STATUS_INSUFFICIENT_PERMS = 144,
173 BFA_STATUS_IM_INV_VLAN_NAME = 145,
174 BFA_STATUS_CMD_NOTSUPP_CNA = 146,
175 BFA_STATUS_IM_PASSTHRU_EDIT = 147,
176 BFA_STATUS_IM_BIND_FAILED = 148,
177 BFA_STATUS_IM_UNBIND_FAILED = 149,
178 BFA_STATUS_IM_PORT_IN_TEAM = 150,
179 BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
180 BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
181 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
182 BFA_STATUS_PBC = 154,
183 BFA_STATUS_DEVID_MISSING = 155,
184 BFA_STATUS_BAD_FWCFG = 156,
185 BFA_STATUS_CREATE_FILE = 157,
186 BFA_STATUS_INVALID_VENDOR = 158,
187 BFA_STATUS_SFP_NOT_READY = 159,
188 BFA_STATUS_FLASH_UNINIT = 160,
189 BFA_STATUS_FLASH_EMPTY = 161,
190 BFA_STATUS_FLASH_CKFAIL = 162,
191 BFA_STATUS_TRUNK_UNSUPP = 163,
192 BFA_STATUS_TRUNK_ENABLED = 164,
193 BFA_STATUS_TRUNK_DISABLED = 165,
194 BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
195 BFA_STATUS_BOOT_CODE_UPDATED = 167,
196 BFA_STATUS_BOOT_VERSION = 168,
197 BFA_STATUS_CARDTYPE_MISSING = 169,
198 BFA_STATUS_INVALID_CARDTYPE = 170,
199 BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
200 BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
201 BFA_STATUS_ETHBOOT_ENABLED = 173,
202 BFA_STATUS_ETHBOOT_DISABLED = 174,
203 BFA_STATUS_IOPROFILE_OFF = 175,
204 BFA_STATUS_NO_PORT_INSTANCE = 176,
205 BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
206 BFA_STATUS_NO_VPORT_LOCK = 178,
207 BFA_STATUS_VPORT_NO_CNFG = 179,
208 BFA_STATUS_MAX_VAL
209};
210
211enum bfa_eproto_status {
212 BFA_EPROTO_BAD_ACCEPT = 0,
213 BFA_EPROTO_UNKNOWN_RSP = 1
214};
215
216#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
new file mode 100644
index 000000000000..3cdea65aee12
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -0,0 +1,2326 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_reg.h"
23#include "bfa_defs.h"
24
25/**
26 * IOC local definitions
27 */
28
29/**
30 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
31 */
32
33#define bfa_ioc_firmware_lock(__ioc) \
34 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
35#define bfa_ioc_firmware_unlock(__ioc) \
36 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
37#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
39#define bfa_ioc_notify_fail(__ioc) \
40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41#define bfa_ioc_sync_start(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
43#define bfa_ioc_sync_join(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
45#define bfa_ioc_sync_leave(__ioc) \
46 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
47#define bfa_ioc_sync_ack(__ioc) \
48 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
49#define bfa_ioc_sync_complete(__ioc) \
50 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
51
52#define bfa_ioc_mbox_cmd_pending(__ioc) \
53 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
54 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
55
56static bool bfa_nw_auto_recover = true;
57
58/*
59 * forward declarations
60 */
61static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
62static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
63static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
64static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
65static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
66static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
67static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
68static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
69static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
70static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
71static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
72static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
73static void bfa_ioc_recover(struct bfa_ioc *ioc);
74static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
75static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
76static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
77static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
78static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
79static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
80static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
81static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
82static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
83static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
84static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
85 u32 boot_param);
86static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
87static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
88 char *serial_num);
89static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
90 char *fw_ver);
91static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
92 char *chip_rev);
93static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
94 char *optrom_ver);
95static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
96 char *manufacturer);
97static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
98static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
99
100/**
101 * IOC state machine definitions/declarations
102 */
103enum ioc_event {
104 IOC_E_RESET = 1, /*!< IOC reset request */
105 IOC_E_ENABLE = 2, /*!< IOC enable request */
106 IOC_E_DISABLE = 3, /*!< IOC disable request */
107 IOC_E_DETACH = 4, /*!< driver detach cleanup */
108 IOC_E_ENABLED = 5, /*!< f/w enabled */
109 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
110 IOC_E_DISABLED = 7, /*!< f/w disabled */
111 IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
112 IOC_E_PFFAILED = 9, /*!< failure notice by iocpf sm */
113 IOC_E_HBFAIL = 10, /*!< heartbeat failure */
114 IOC_E_HWERROR = 11, /*!< hardware error interrupt */
115 IOC_E_TIMEOUT = 12, /*!< timeout */
116};
117
118bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
125bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
127
128static struct bfa_sm_table ioc_sm_table[] = {
129 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
130 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
131 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
132 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
135 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
136 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138};
139
140/**
141 * IOCPF state machine definitions/declarations
142 */
143
144/*
145 * Forward declareations for iocpf state machine
146 */
147static void bfa_iocpf_enable(struct bfa_ioc *ioc);
148static void bfa_iocpf_disable(struct bfa_ioc *ioc);
149static void bfa_iocpf_fail(struct bfa_ioc *ioc);
150static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
151static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
152static void bfa_iocpf_stop(struct bfa_ioc *ioc);
153
154/**
155 * IOCPF state machine events
156 */
157enum iocpf_event {
158 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
159 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
160 IOCPF_E_STOP = 3, /*!< stop on driver detach */
161 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
162 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
163 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
164 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
165 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
166 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
167 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
168 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
169};
170
171/**
172 * IOCPF states
173 */
174enum bfa_iocpf_state {
175 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
176 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
177 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
178 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
179 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
180 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
181 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
182 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
183 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
184};
185
186bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
187bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
188bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
189bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
190bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
191bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
192bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
193bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
194 enum iocpf_event);
195bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
196bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
197bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
199bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
200 enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
202
203static struct bfa_sm_table iocpf_sm_table[] = {
204 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
205 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
206 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
207 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
208 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
209 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
210 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
211 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
212 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
213 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
214 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
215 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
216 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
217 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
218};
219
220/**
221 * IOC State Machine
222 */
223
224/**
225 * Beginning state. IOC uninit state.
226 */
227static void
228bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
229{
230}
231
232/**
233 * IOC is in uninit state.
234 */
235static void
236bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
237{
238 switch (event) {
239 case IOC_E_RESET:
240 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
241 break;
242
243 default:
244 bfa_sm_fault(event);
245 }
246}
247
248/**
249 * Reset entry actions -- initialize state machine
250 */
251static void
252bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
253{
254 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
255}
256
257/**
258 * IOC is in reset state.
259 */
260static void
261bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
262{
263 switch (event) {
264 case IOC_E_ENABLE:
265 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
266 break;
267
268 case IOC_E_DISABLE:
269 bfa_ioc_disable_comp(ioc);
270 break;
271
272 case IOC_E_DETACH:
273 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
274 break;
275
276 default:
277 bfa_sm_fault(event);
278 }
279}
280
281static void
282bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
283{
284 bfa_iocpf_enable(ioc);
285}
286
287/**
288 * Host IOC function is being enabled, awaiting response from firmware.
289 * Semaphore is acquired.
290 */
291static void
292bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
293{
294 switch (event) {
295 case IOC_E_ENABLED:
296 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
297 break;
298
299 case IOC_E_PFFAILED:
300 /* !!! fall through !!! */
301 case IOC_E_HWERROR:
302 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
303 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
304 if (event != IOC_E_PFFAILED)
305 bfa_iocpf_initfail(ioc);
306 break;
307
308 case IOC_E_DISABLE:
309 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
310 break;
311
312 case IOC_E_DETACH:
313 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
314 bfa_iocpf_stop(ioc);
315 break;
316
317 case IOC_E_ENABLE:
318 break;
319
320 default:
321 bfa_sm_fault(event);
322 }
323}
324
325/**
326 * Semaphore should be acquired for version check.
327 */
328static void
329bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
330{
331 mod_timer(&ioc->ioc_timer, jiffies +
332 msecs_to_jiffies(BFA_IOC_TOV));
333 bfa_ioc_send_getattr(ioc);
334}
335
336/**
337 * IOC configuration in progress. Timer is active.
338 */
339static void
340bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
341{
342 switch (event) {
343 case IOC_E_FWRSP_GETATTR:
344 del_timer(&ioc->ioc_timer);
345 bfa_ioc_check_attr_wwns(ioc);
346 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
347 break;
348
349 case IOC_E_PFFAILED:
350 case IOC_E_HWERROR:
351 del_timer(&ioc->ioc_timer);
352 /* fall through */
353 case IOC_E_TIMEOUT:
354 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
355 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
356 if (event != IOC_E_PFFAILED)
357 bfa_iocpf_getattrfail(ioc);
358 break;
359
360 case IOC_E_DISABLE:
361 del_timer(&ioc->ioc_timer);
362 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
363 break;
364
365 case IOC_E_ENABLE:
366 break;
367
368 default:
369 bfa_sm_fault(event);
370 }
371}
372
373static void
374bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
375{
376 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
377 bfa_ioc_hb_monitor(ioc);
378}
379
380static void
381bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
382{
383 switch (event) {
384 case IOC_E_ENABLE:
385 break;
386
387 case IOC_E_DISABLE:
388 bfa_ioc_hb_stop(ioc);
389 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
390 break;
391
392 case IOC_E_PFFAILED:
393 case IOC_E_HWERROR:
394 bfa_ioc_hb_stop(ioc);
395 /* !!! fall through !!! */
396 case IOC_E_HBFAIL:
397 bfa_ioc_fail_notify(ioc);
398 if (ioc->iocpf.auto_recover)
399 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
400 else
401 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
402
403 if (event != IOC_E_PFFAILED)
404 bfa_iocpf_fail(ioc);
405 break;
406
407 default:
408 bfa_sm_fault(event);
409 }
410}
411
412static void
413bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
414{
415 bfa_iocpf_disable(ioc);
416}
417
418/**
419 * IOC is being desabled
420 */
421static void
422bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
423{
424 switch (event) {
425 case IOC_E_DISABLED:
426 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
427 break;
428
429 case IOC_E_HWERROR:
430 /*
431 * No state change. Will move to disabled state
432 * after iocpf sm completes failure processing and
433 * moves to disabled state.
434 */
435 bfa_iocpf_fail(ioc);
436 break;
437
438 default:
439 bfa_sm_fault(event);
440 }
441}
442
443/**
444 * IOC desable completion entry.
445 */
446static void
447bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
448{
449 bfa_ioc_disable_comp(ioc);
450}
451
452static void
453bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
454{
455 switch (event) {
456 case IOC_E_ENABLE:
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
458 break;
459
460 case IOC_E_DISABLE:
461 ioc->cbfn->disable_cbfn(ioc->bfa);
462 break;
463
464 case IOC_E_DETACH:
465 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
466 bfa_iocpf_stop(ioc);
467 break;
468
469 default:
470 bfa_sm_fault(event);
471 }
472}
473
474static void
475bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
476{
477}
478
479/**
480 * Hardware initialization retry.
481 */
482static void
483bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
484{
485 switch (event) {
486 case IOC_E_ENABLED:
487 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
488 break;
489
490 case IOC_E_PFFAILED:
491 case IOC_E_HWERROR:
492 /**
493 * Initialization retry failed.
494 */
495 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
496 if (event != IOC_E_PFFAILED)
497 bfa_iocpf_initfail(ioc);
498 break;
499
500 case IOC_E_INITFAILED:
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
502 break;
503
504 case IOC_E_ENABLE:
505 break;
506
507 case IOC_E_DISABLE:
508 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
509 break;
510
511 case IOC_E_DETACH:
512 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
513 bfa_iocpf_stop(ioc);
514 break;
515
516 default:
517 bfa_sm_fault(event);
518 }
519}
520
521static void
522bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
523{
524}
525
526/**
527 * IOC failure.
528 */
529static void
530bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
531{
532 switch (event) {
533 case IOC_E_ENABLE:
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
535 break;
536
537 case IOC_E_DISABLE:
538 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
539 break;
540
541 case IOC_E_DETACH:
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 bfa_iocpf_stop(ioc);
544 break;
545
546 case IOC_E_HWERROR:
547 /* HB failure notification, ignore. */
548 break;
549
550 default:
551 bfa_sm_fault(event);
552 }
553}
554
555/**
556 * IOCPF State Machine
557 */
558
559/**
560 * Reset entry actions -- initialize state machine
561 */
562static void
563bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
564{
565 iocpf->retry_count = 0;
566 iocpf->auto_recover = bfa_nw_auto_recover;
567}
568
569/**
570 * Beginning state. IOC is in reset state.
571 */
572static void
573bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
574{
575 switch (event) {
576 case IOCPF_E_ENABLE:
577 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
578 break;
579
580 case IOCPF_E_STOP:
581 break;
582
583 default:
584 bfa_sm_fault(event);
585 }
586}
587
588/**
589 * Semaphore should be acquired for version check.
590 */
591static void
592bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
593{
594 bfa_ioc_hw_sem_init(iocpf->ioc);
595 bfa_ioc_hw_sem_get(iocpf->ioc);
596}
597
598/**
599 * Awaiting h/w semaphore to continue with version check.
600 */
601static void
602bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
603{
604 struct bfa_ioc *ioc = iocpf->ioc;
605
606 switch (event) {
607 case IOCPF_E_SEMLOCKED:
608 if (bfa_ioc_firmware_lock(ioc)) {
609 if (bfa_ioc_sync_start(ioc)) {
610 iocpf->retry_count = 0;
611 bfa_ioc_sync_join(ioc);
612 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
613 } else {
614 bfa_ioc_firmware_unlock(ioc);
615 bfa_nw_ioc_hw_sem_release(ioc);
616 mod_timer(&ioc->sem_timer, jiffies +
617 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
618 }
619 } else {
620 bfa_nw_ioc_hw_sem_release(ioc);
621 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
622 }
623 break;
624
625 case IOCPF_E_DISABLE:
626 bfa_ioc_hw_sem_get_cancel(ioc);
627 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
628 bfa_ioc_pf_disabled(ioc);
629 break;
630
631 case IOCPF_E_STOP:
632 bfa_ioc_hw_sem_get_cancel(ioc);
633 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
634 break;
635
636 default:
637 bfa_sm_fault(event);
638 }
639}
640
641/**
642 * Notify enable completion callback
643 */
644static void
645bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
646{
647 /* Call only the first time sm enters fwmismatch state. */
648 if (iocpf->retry_count == 0)
649 bfa_ioc_pf_fwmismatch(iocpf->ioc);
650
651 iocpf->retry_count++;
652 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
653 msecs_to_jiffies(BFA_IOC_TOV));
654}
655
656/**
657 * Awaiting firmware version match.
658 */
659static void
660bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
661{
662 struct bfa_ioc *ioc = iocpf->ioc;
663
664 switch (event) {
665 case IOCPF_E_TIMEOUT:
666 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
667 break;
668
669 case IOCPF_E_DISABLE:
670 del_timer(&ioc->iocpf_timer);
671 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
672 bfa_ioc_pf_disabled(ioc);
673 break;
674
675 case IOCPF_E_STOP:
676 del_timer(&ioc->iocpf_timer);
677 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
678 break;
679
680 default:
681 bfa_sm_fault(event);
682 }
683}
684
685/**
686 * Request for semaphore.
687 */
688static void
689bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
690{
691 bfa_ioc_hw_sem_get(iocpf->ioc);
692}
693
694/**
695 * Awaiting semaphore for h/w initialzation.
696 */
697static void
698bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
699{
700 struct bfa_ioc *ioc = iocpf->ioc;
701
702 switch (event) {
703 case IOCPF_E_SEMLOCKED:
704 if (bfa_ioc_sync_complete(ioc)) {
705 bfa_ioc_sync_join(ioc);
706 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
707 } else {
708 bfa_nw_ioc_hw_sem_release(ioc);
709 mod_timer(&ioc->sem_timer, jiffies +
710 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
711 }
712 break;
713
714 case IOCPF_E_DISABLE:
715 bfa_ioc_hw_sem_get_cancel(ioc);
716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
717 break;
718
719 default:
720 bfa_sm_fault(event);
721 }
722}
723
724static void
725bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
726{
727 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
728 msecs_to_jiffies(BFA_IOC_TOV));
729 bfa_ioc_reset(iocpf->ioc, 0);
730}
731
732/**
733 * Hardware is being initialized. Interrupts are enabled.
734 * Holding hardware semaphore lock.
735 */
736static void
737bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
738{
739 struct bfa_ioc *ioc = iocpf->ioc;
740
741 switch (event) {
742 case IOCPF_E_FWREADY:
743 del_timer(&ioc->iocpf_timer);
744 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
745 break;
746
747 case IOCPF_E_INITFAIL:
748 del_timer(&ioc->iocpf_timer);
749 /*
750 * !!! fall through !!!
751 */
752
753 case IOCPF_E_TIMEOUT:
754 bfa_nw_ioc_hw_sem_release(ioc);
755 if (event == IOCPF_E_TIMEOUT)
756 bfa_ioc_pf_failed(ioc);
757 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
758 break;
759
760 case IOCPF_E_DISABLE:
761 del_timer(&ioc->iocpf_timer);
762 bfa_ioc_sync_leave(ioc);
763 bfa_nw_ioc_hw_sem_release(ioc);
764 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
765 break;
766
767 default:
768 bfa_sm_fault(event);
769 }
770}
771
772static void
773bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
774{
775 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
776 msecs_to_jiffies(BFA_IOC_TOV));
777 bfa_ioc_send_enable(iocpf->ioc);
778}
779
780/**
781 * Host IOC function is being enabled, awaiting response from firmware.
782 * Semaphore is acquired.
783 */
784static void
785bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
786{
787 struct bfa_ioc *ioc = iocpf->ioc;
788
789 switch (event) {
790 case IOCPF_E_FWRSP_ENABLE:
791 del_timer(&ioc->iocpf_timer);
792 bfa_nw_ioc_hw_sem_release(ioc);
793 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
794 break;
795
796 case IOCPF_E_INITFAIL:
797 del_timer(&ioc->iocpf_timer);
798 /*
799 * !!! fall through !!!
800 */
801 case IOCPF_E_TIMEOUT:
802 bfa_nw_ioc_hw_sem_release(ioc);
803 if (event == IOCPF_E_TIMEOUT)
804 bfa_ioc_pf_failed(ioc);
805 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
806 break;
807
808 case IOCPF_E_DISABLE:
809 del_timer(&ioc->iocpf_timer);
810 bfa_nw_ioc_hw_sem_release(ioc);
811 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
812 break;
813
814 case IOCPF_E_FWREADY:
815 bfa_ioc_send_enable(ioc);
816 break;
817
818 default:
819 bfa_sm_fault(event);
820 }
821}
822
823static bool
824bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
825{
826 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
827}
828
829static void
830bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
831{
832 bfa_ioc_pf_enabled(iocpf->ioc);
833}
834
835static void
836bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
837{
838 struct bfa_ioc *ioc = iocpf->ioc;
839
840 switch (event) {
841 case IOCPF_E_DISABLE:
842 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
843 break;
844
845 case IOCPF_E_GETATTRFAIL:
846 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
847 break;
848
849 case IOCPF_E_FAIL:
850 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
851 break;
852
853 case IOCPF_E_FWREADY:
854 bfa_ioc_pf_failed(ioc);
855 if (bfa_nw_ioc_is_operational(ioc))
856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
857 else
858 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
859 break;
860
861 default:
862 bfa_sm_fault(event);
863 }
864}
865
866static void
867bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
868{
869 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
870 msecs_to_jiffies(BFA_IOC_TOV));
871 bfa_ioc_send_disable(iocpf->ioc);
872}
873
874/**
875 * IOC is being disabled
876 */
877static void
878bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
879{
880 struct bfa_ioc *ioc = iocpf->ioc;
881
882 switch (event) {
883 case IOCPF_E_FWRSP_DISABLE:
884 case IOCPF_E_FWREADY:
885 del_timer(&ioc->iocpf_timer);
886 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
887 break;
888
889 case IOCPF_E_FAIL:
890 del_timer(&ioc->iocpf_timer);
891 /*
892 * !!! fall through !!!
893 */
894
895 case IOCPF_E_TIMEOUT:
896 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
897 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
898 break;
899
900 case IOCPF_E_FWRSP_ENABLE:
901 break;
902
903 default:
904 bfa_sm_fault(event);
905 }
906}
907
908static void
909bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
910{
911 bfa_ioc_hw_sem_get(iocpf->ioc);
912}
913
914/**
915 * IOC hb ack request is being removed.
916 */
917static void
918bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
919{
920 struct bfa_ioc *ioc = iocpf->ioc;
921
922 switch (event) {
923 case IOCPF_E_SEMLOCKED:
924 bfa_ioc_sync_leave(ioc);
925 bfa_nw_ioc_hw_sem_release(ioc);
926 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
927 break;
928
929 case IOCPF_E_FAIL:
930 break;
931
932 default:
933 bfa_sm_fault(event);
934 }
935}
936
937/**
938 * IOC disable completion entry.
939 */
940static void
941bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
942{
943 bfa_ioc_mbox_flush(iocpf->ioc);
944 bfa_ioc_pf_disabled(iocpf->ioc);
945}
946
947static void
948bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
949{
950 struct bfa_ioc *ioc = iocpf->ioc;
951
952 switch (event) {
953 case IOCPF_E_ENABLE:
954 iocpf->retry_count = 0;
955 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
956 break;
957
958 case IOCPF_E_STOP:
959 bfa_ioc_firmware_unlock(ioc);
960 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
961 break;
962
963 default:
964 bfa_sm_fault(event);
965 }
966}
967
968static void
969bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
970{
971 bfa_ioc_hw_sem_get(iocpf->ioc);
972}
973
974/**
975 * Hardware initialization failed.
976 */
977static void
978bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
979{
980 struct bfa_ioc *ioc = iocpf->ioc;
981
982 switch (event) {
983 case IOCPF_E_SEMLOCKED:
984 bfa_ioc_notify_fail(ioc);
985 bfa_ioc_sync_ack(ioc);
986 iocpf->retry_count++;
987 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
988 bfa_ioc_sync_leave(ioc);
989 bfa_nw_ioc_hw_sem_release(ioc);
990 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
991 } else {
992 if (bfa_ioc_sync_complete(ioc))
993 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
994 else {
995 bfa_nw_ioc_hw_sem_release(ioc);
996 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
997 }
998 }
999 break;
1000
1001 case IOCPF_E_DISABLE:
1002 bfa_ioc_hw_sem_get_cancel(ioc);
1003 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1004 break;
1005
1006 case IOCPF_E_STOP:
1007 bfa_ioc_hw_sem_get_cancel(ioc);
1008 bfa_ioc_firmware_unlock(ioc);
1009 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1010 break;
1011
1012 case IOCPF_E_FAIL:
1013 break;
1014
1015 default:
1016 bfa_sm_fault(event);
1017 }
1018}
1019
1020static void
1021bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1022{
1023 bfa_ioc_pf_initfailed(iocpf->ioc);
1024}
1025
1026/**
1027 * Hardware initialization failed.
1028 */
1029static void
1030bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1031{
1032 struct bfa_ioc *ioc = iocpf->ioc;
1033
1034 switch (event) {
1035 case IOCPF_E_DISABLE:
1036 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1037 break;
1038
1039 case IOCPF_E_STOP:
1040 bfa_ioc_firmware_unlock(ioc);
1041 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1042 break;
1043
1044 default:
1045 bfa_sm_fault(event);
1046 }
1047}
1048
1049static void
1050bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1051{
1052 /**
1053 * Mark IOC as failed in hardware and stop firmware.
1054 */
1055 bfa_ioc_lpu_stop(iocpf->ioc);
1056
1057 /**
1058 * Flush any queued up mailbox requests.
1059 */
1060 bfa_ioc_mbox_flush(iocpf->ioc);
1061 bfa_ioc_hw_sem_get(iocpf->ioc);
1062}
1063
1064/**
1065 * IOC is in failed state.
1066 */
1067static void
1068bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1069{
1070 struct bfa_ioc *ioc = iocpf->ioc;
1071
1072 switch (event) {
1073 case IOCPF_E_SEMLOCKED:
1074 iocpf->retry_count = 0;
1075 bfa_ioc_sync_ack(ioc);
1076 bfa_ioc_notify_fail(ioc);
1077 if (!iocpf->auto_recover) {
1078 bfa_ioc_sync_leave(ioc);
1079 bfa_nw_ioc_hw_sem_release(ioc);
1080 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1081 } else {
1082 if (bfa_ioc_sync_complete(ioc))
1083 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1084 else {
1085 bfa_nw_ioc_hw_sem_release(ioc);
1086 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1087 }
1088 }
1089 break;
1090
1091 case IOCPF_E_DISABLE:
1092 bfa_ioc_hw_sem_get_cancel(ioc);
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1094 break;
1095
1096 case IOCPF_E_FAIL:
1097 break;
1098
1099 default:
1100 bfa_sm_fault(event);
1101 }
1102}
1103
1104static void
1105bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1106{
1107}
1108
1109/**
1110 * @brief
1111 * IOC is in failed state.
1112 */
1113static void
1114bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1115{
1116 switch (event) {
1117 case IOCPF_E_DISABLE:
1118 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1119 break;
1120
1121 default:
1122 bfa_sm_fault(event);
1123 }
1124}
1125
1126/**
1127 * BFA IOC private functions
1128 */
1129
1130/**
1131 * Notify common modules registered for notification.
1132 */
1133static void
1134bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1135{
1136 struct bfa_ioc_notify *notify;
1137 struct list_head *qe;
1138
1139 list_for_each(qe, &ioc->notify_q) {
1140 notify = (struct bfa_ioc_notify *)qe;
1141 notify->cbfn(notify->cbarg, event);
1142 }
1143}
1144
1145static void
1146bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1147{
1148 ioc->cbfn->disable_cbfn(ioc->bfa);
1149 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1150}
1151
1152bool
1153bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1154{
1155 u32 r32;
1156 int cnt = 0;
1157#define BFA_SEM_SPINCNT 3000
1158
1159 r32 = readl(sem_reg);
1160
1161 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1162 cnt++;
1163 udelay(2);
1164 r32 = readl(sem_reg);
1165 }
1166
1167 if (r32 == 0)
1168 return true;
1169
1170 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
1171 return false;
1172}
1173
1174void
1175bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1176{
1177 writel(1, sem_reg);
1178}
1179
1180static void
1181bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1182{
1183 struct bfi_ioc_image_hdr fwhdr;
1184 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1185
1186 if (fwstate == BFI_IOC_UNINIT)
1187 return;
1188
1189 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1190
1191 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
1192 return;
1193
1194 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1195
1196 /*
1197 * Try to lock and then unlock the semaphore.
1198 */
1199 readl(ioc->ioc_regs.ioc_sem_reg);
1200 writel(1, ioc->ioc_regs.ioc_sem_reg);
1201}
1202
1203static void
1204bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1205{
1206 u32 r32;
1207
1208 /**
1209 * First read to the semaphore register will return 0, subsequent reads
1210 * will return 1. Semaphore is released by writing 1 to the register
1211 */
1212 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1213 if (r32 == 0) {
1214 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1215 return;
1216 }
1217
1218 mod_timer(&ioc->sem_timer, jiffies +
1219 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1220}
1221
1222void
1223bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1224{
1225 writel(1, ioc->ioc_regs.ioc_sem_reg);
1226}
1227
1228static void
1229bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1230{
1231 del_timer(&ioc->sem_timer);
1232}
1233
1234/**
1235 * @brief
1236 * Initialize LPU local memory (aka secondary memory / SRAM)
1237 */
1238static void
1239bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1240{
1241 u32 pss_ctl;
1242 int i;
1243#define PSS_LMEM_INIT_TIME 10000
1244
1245 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1246 pss_ctl &= ~__PSS_LMEM_RESET;
1247 pss_ctl |= __PSS_LMEM_INIT_EN;
1248
1249 /*
1250 * i2c workaround 12.5khz clock
1251 */
1252 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1253 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1254
1255 /**
1256 * wait for memory initialization to be complete
1257 */
1258 i = 0;
1259 do {
1260 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1261 i++;
1262 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1263
1264 /**
1265 * If memory initialization is not successful, IOC timeout will catch
1266 * such failures.
1267 */
1268 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1269
1270 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1271 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1272}
1273
1274static void
1275bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1276{
1277 u32 pss_ctl;
1278
1279 /**
1280 * Take processor out of reset.
1281 */
1282 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1283 pss_ctl &= ~__PSS_LPU0_RESET;
1284
1285 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1286}
1287
1288static void
1289bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1290{
1291 u32 pss_ctl;
1292
1293 /**
1294 * Put processors in reset.
1295 */
1296 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1297 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1298
1299 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1300}
1301
1302/**
1303 * Get driver and firmware versions.
1304 */
1305void
1306bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1307{
1308 u32 pgnum;
1309 u32 loff = 0;
1310 int i;
1311 u32 *fwsig = (u32 *) fwhdr;
1312
1313 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1314 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1315
1316 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1317 i++) {
1318 fwsig[i] =
1319 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1320 loff += sizeof(u32);
1321 }
1322}
1323
1324/**
1325 * Returns TRUE if same.
1326 */
1327bool
1328bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1329{
1330 struct bfi_ioc_image_hdr *drv_fwhdr;
1331 int i;
1332
1333 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1334 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1335
1336 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1337 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1338 return false;
1339 }
1340
1341 return true;
1342}
1343
1344/**
1345 * Return true if current running version is valid. Firmware signature and
1346 * execution context (driver/bios) must match.
1347 */
1348static bool
1349bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1350{
1351 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1352
1353 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1354 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1355 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1356
1357 if (fwhdr.signature != drv_fwhdr->signature)
1358 return false;
1359
1360 if (swab32(fwhdr.param) != boot_env)
1361 return false;
1362
1363 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1364}
1365
1366/**
1367 * Conditionally flush any pending message from firmware at start.
1368 */
1369static void
1370bfa_ioc_msgflush(struct bfa_ioc *ioc)
1371{
1372 u32 r32;
1373
1374 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1375 if (r32)
1376 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1377}
1378
1379/**
1380 * @img ioc_init_logic.jpg
1381 */
1382static void
1383bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1384{
1385 enum bfi_ioc_state ioc_fwstate;
1386 bool fwvalid;
1387 u32 boot_env;
1388
1389 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1390
1391 boot_env = BFI_BOOT_LOADER_OS;
1392
1393 if (force)
1394 ioc_fwstate = BFI_IOC_UNINIT;
1395
1396 /**
1397 * check if firmware is valid
1398 */
1399 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1400 false : bfa_ioc_fwver_valid(ioc, boot_env);
1401
1402 if (!fwvalid) {
1403 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
1404 return;
1405 }
1406
1407 /**
1408 * If hardware initialization is in progress (initialized by other IOC),
1409 * just wait for an initialization completion interrupt.
1410 */
1411 if (ioc_fwstate == BFI_IOC_INITING) {
1412 ioc->cbfn->reset_cbfn(ioc->bfa);
1413 return;
1414 }
1415
1416 /**
1417 * If IOC function is disabled and firmware version is same,
1418 * just re-enable IOC.
1419 */
1420 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1421 /**
1422 * When using MSI-X any pending firmware ready event should
1423 * be flushed. Otherwise MSI-X interrupts are not delivered.
1424 */
1425 bfa_ioc_msgflush(ioc);
1426 ioc->cbfn->reset_cbfn(ioc->bfa);
1427 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1428 return;
1429 }
1430
1431 /**
1432 * Initialize the h/w for any other states.
1433 */
1434 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
1435}
1436
1437void
1438bfa_nw_ioc_timeout(void *ioc_arg)
1439{
1440 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1441
1442 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1443}
1444
1445static void
1446bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1447{
1448 u32 *msgp = (u32 *) ioc_msg;
1449 u32 i;
1450
1451 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1452
1453 /*
1454 * first write msg to mailbox registers
1455 */
1456 for (i = 0; i < len / sizeof(u32); i++)
1457 writel(cpu_to_le32(msgp[i]),
1458 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1459
1460 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1461 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1462
1463 /*
1464 * write 1 to mailbox CMD to trigger LPU event
1465 */
1466 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1467 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1468}
1469
1470static void
1471bfa_ioc_send_enable(struct bfa_ioc *ioc)
1472{
1473 struct bfi_ioc_ctrl_req enable_req;
1474 struct timeval tv;
1475
1476 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1477 bfa_ioc_portid(ioc));
1478 enable_req.ioc_class = ioc->ioc_mc;
1479 do_gettimeofday(&tv);
1480 enable_req.tv_sec = ntohl(tv.tv_sec);
1481 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1482}
1483
1484static void
1485bfa_ioc_send_disable(struct bfa_ioc *ioc)
1486{
1487 struct bfi_ioc_ctrl_req disable_req;
1488
1489 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1490 bfa_ioc_portid(ioc));
1491 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1492}
1493
1494static void
1495bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1496{
1497 struct bfi_ioc_getattr_req attr_req;
1498
1499 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1500 bfa_ioc_portid(ioc));
1501 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1502 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1503}
1504
1505void
1506bfa_nw_ioc_hb_check(void *cbarg)
1507{
1508 struct bfa_ioc *ioc = cbarg;
1509 u32 hb_count;
1510
1511 hb_count = readl(ioc->ioc_regs.heartbeat);
1512 if (ioc->hb_count == hb_count) {
1513 bfa_ioc_recover(ioc);
1514 return;
1515 } else {
1516 ioc->hb_count = hb_count;
1517 }
1518
1519 bfa_ioc_mbox_poll(ioc);
1520 mod_timer(&ioc->hb_timer, jiffies +
1521 msecs_to_jiffies(BFA_IOC_HB_TOV));
1522}
1523
1524static void
1525bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1526{
1527 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1528 mod_timer(&ioc->hb_timer, jiffies +
1529 msecs_to_jiffies(BFA_IOC_HB_TOV));
1530}
1531
1532static void
1533bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1534{
1535 del_timer(&ioc->hb_timer);
1536}
1537
1538/**
1539 * @brief
1540 * Initiate a full firmware download.
1541 */
1542static void
1543bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1544 u32 boot_env)
1545{
1546 u32 *fwimg;
1547 u32 pgnum;
1548 u32 loff = 0;
1549 u32 chunkno = 0;
1550 u32 i;
1551
1552 /**
1553 * Initialize LMEM first before code download
1554 */
1555 bfa_ioc_lmem_init(ioc);
1556
1557 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1558
1559 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1560
1561 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1562
1563 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1564 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1565 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1566 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1567 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1568 }
1569
1570 /**
1571 * write smem
1572 */
1573 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1574 ((ioc->ioc_regs.smem_page_start) + (loff)));
1575
1576 loff += sizeof(u32);
1577
1578 /**
1579 * handle page offset wrap around
1580 */
1581 loff = PSS_SMEM_PGOFF(loff);
1582 if (loff == 0) {
1583 pgnum++;
1584 writel(pgnum,
1585 ioc->ioc_regs.host_page_num_fn);
1586 }
1587 }
1588
1589 writel(bfa_ioc_smem_pgnum(ioc, 0),
1590 ioc->ioc_regs.host_page_num_fn);
1591
1592 /*
1593 * Set boot type and boot param at the end.
1594 */
1595 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1596 + (BFI_BOOT_TYPE_OFF)));
1597 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1598 + (BFI_BOOT_LOADER_OFF)));
1599}
1600
1601static void
1602bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1603{
1604 bfa_ioc_hwinit(ioc, force);
1605}
1606
1607/**
1608 * @brief
1609 * Update BFA configuration from firmware configuration.
1610 */
1611static void
1612bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1613{
1614 struct bfi_ioc_attr *attr = ioc->attr;
1615
1616 attr->adapter_prop = ntohl(attr->adapter_prop);
1617 attr->card_type = ntohl(attr->card_type);
1618 attr->maxfrsize = ntohs(attr->maxfrsize);
1619
1620 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1621}
1622
1623/**
1624 * Attach time initialization of mbox logic.
1625 */
1626static void
1627bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1628{
1629 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1630 int mc;
1631
1632 INIT_LIST_HEAD(&mod->cmd_q);
1633 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1634 mod->mbhdlr[mc].cbfn = NULL;
1635 mod->mbhdlr[mc].cbarg = ioc->bfa;
1636 }
1637}
1638
1639/**
1640 * Mbox poll timer -- restarts any pending mailbox requests.
1641 */
1642static void
1643bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1644{
1645 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1646 struct bfa_mbox_cmd *cmd;
1647 u32 stat;
1648
1649 /**
1650 * If no command pending, do nothing
1651 */
1652 if (list_empty(&mod->cmd_q))
1653 return;
1654
1655 /**
1656 * If previous command is not yet fetched by firmware, do nothing
1657 */
1658 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1659 if (stat)
1660 return;
1661
1662 /**
1663 * Enqueue command to firmware.
1664 */
1665 bfa_q_deq(&mod->cmd_q, &cmd);
1666 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1667}
1668
1669/**
1670 * Cleanup any pending requests.
1671 */
1672static void
1673bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1674{
1675 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1676 struct bfa_mbox_cmd *cmd;
1677
1678 while (!list_empty(&mod->cmd_q))
1679 bfa_q_deq(&mod->cmd_q, &cmd);
1680}
1681
1682static void
1683bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1684{
1685 /**
1686 * Notify driver and common modules registered for notification.
1687 */
1688 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1689 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1690}
1691
1692static void
1693bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1694{
1695 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1696}
1697
1698static void
1699bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1700{
1701 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1702}
1703
1704static void
1705bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
1706{
1707 bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
1708}
1709
1710static void
1711bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1712{
1713 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1714}
1715
1716static void
1717bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1718{
1719 /**
1720 * Provide enable completion callback and AEN notification.
1721 */
1722 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1723}
1724
1725/**
1726 * IOC public
1727 */
1728static enum bfa_status
1729bfa_ioc_pll_init(struct bfa_ioc *ioc)
1730{
1731 /*
1732 * Hold semaphore so that nobody can access the chip during init.
1733 */
1734 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1735
1736 bfa_ioc_pll_init_asic(ioc);
1737
1738 ioc->pllinit = true;
1739 /*
1740 * release semaphore.
1741 */
1742 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1743
1744 return BFA_STATUS_OK;
1745}
1746
1747/**
1748 * Interface used by diag module to do firmware boot with memory test
1749 * as the entry vector.
1750 */
1751static void
1752bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
1753{
1754 void __iomem *rb;
1755
1756 bfa_ioc_stats(ioc, ioc_boots);
1757
1758 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1759 return;
1760
1761 /**
1762 * Initialize IOC state of all functions on a chip reset.
1763 */
1764 rb = ioc->pcidev.pci_bar_kva;
1765 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1766 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1767 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1768 } else {
1769 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1770 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1771 }
1772
1773 bfa_ioc_msgflush(ioc);
1774 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1775
1776 /**
1777 * Enable interrupts just before starting LPU
1778 */
1779 ioc->cbfn->reset_cbfn(ioc->bfa);
1780 bfa_ioc_lpu_start(ioc);
1781}
1782
1783/**
1784 * Enable/disable IOC failure auto recovery.
1785 */
1786void
1787bfa_nw_ioc_auto_recover(bool auto_recover)
1788{
1789 bfa_nw_auto_recover = auto_recover;
1790}
1791
1792static void
1793bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1794{
1795 u32 *msgp = mbmsg;
1796 u32 r32;
1797 int i;
1798
1799 /**
1800 * read the MBOX msg
1801 */
1802 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1803 i++) {
1804 r32 = readl(ioc->ioc_regs.lpu_mbox +
1805 i * sizeof(u32));
1806 msgp[i] = htonl(r32);
1807 }
1808
1809 /**
1810 * turn off mailbox interrupt by clearing mailbox status
1811 */
1812 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1813 readl(ioc->ioc_regs.lpu_mbox_cmd);
1814}
1815
1816static void
1817bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1818{
1819 union bfi_ioc_i2h_msg_u *msg;
1820 struct bfa_iocpf *iocpf = &ioc->iocpf;
1821
1822 msg = (union bfi_ioc_i2h_msg_u *) m;
1823
1824 bfa_ioc_stats(ioc, ioc_isrs);
1825
1826 switch (msg->mh.msg_id) {
1827 case BFI_IOC_I2H_HBEAT:
1828 break;
1829
1830 case BFI_IOC_I2H_READY_EVENT:
1831 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1832 break;
1833
1834 case BFI_IOC_I2H_ENABLE_REPLY:
1835 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1836 break;
1837
1838 case BFI_IOC_I2H_DISABLE_REPLY:
1839 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1840 break;
1841
1842 case BFI_IOC_I2H_GETATTR_REPLY:
1843 bfa_ioc_getattr_reply(ioc);
1844 break;
1845
1846 default:
1847 BUG_ON(1);
1848 }
1849}
1850
1851/**
1852 * IOC attach time initialization and setup.
1853 *
1854 * @param[in] ioc memory for IOC
1855 * @param[in] bfa driver instance structure
1856 */
1857void
1858bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1859{
1860 ioc->bfa = bfa;
1861 ioc->cbfn = cbfn;
1862 ioc->fcmode = false;
1863 ioc->pllinit = false;
1864 ioc->dbg_fwsave_once = true;
1865 ioc->iocpf.ioc = ioc;
1866
1867 bfa_ioc_mbox_attach(ioc);
1868 INIT_LIST_HEAD(&ioc->notify_q);
1869
1870 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1871 bfa_fsm_send_event(ioc, IOC_E_RESET);
1872}
1873
1874/**
1875 * Driver detach time IOC cleanup.
1876 */
1877void
1878bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1879{
1880 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1881}
1882
1883/**
1884 * Setup IOC PCI properties.
1885 *
1886 * @param[in] pcidev PCI device information for this IOC
1887 */
1888void
1889bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1890 enum bfi_mclass mc)
1891{
1892 ioc->ioc_mc = mc;
1893 ioc->pcidev = *pcidev;
1894 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1895 ioc->cna = ioc->ctdev && !ioc->fcmode;
1896
1897 bfa_nw_ioc_set_ct_hwif(ioc);
1898
1899 bfa_ioc_map_port(ioc);
1900 bfa_ioc_reg_init(ioc);
1901}
1902
1903/**
1904 * Initialize IOC dma memory
1905 *
1906 * @param[in] dm_kva kernel virtual address of IOC dma memory
1907 * @param[in] dm_pa physical address of IOC dma memory
1908 */
1909void
1910bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
1911{
1912 /**
1913 * dma memory for firmware attribute
1914 */
1915 ioc->attr_dma.kva = dm_kva;
1916 ioc->attr_dma.pa = dm_pa;
1917 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1918}
1919
1920/**
1921 * Return size of dma memory required.
1922 */
1923u32
1924bfa_nw_ioc_meminfo(void)
1925{
1926 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1927}
1928
1929void
1930bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1931{
1932 bfa_ioc_stats(ioc, ioc_enables);
1933 ioc->dbg_fwsave_once = true;
1934
1935 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1936}
1937
1938void
1939bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1940{
1941 bfa_ioc_stats(ioc, ioc_disables);
1942 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1943}
1944
1945static u32
1946bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1947{
1948 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1949}
1950
1951/**
1952 * Register mailbox message handler function, to be called by common modules
1953 */
1954void
1955bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1956 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1957{
1958 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1959
1960 mod->mbhdlr[mc].cbfn = cbfn;
1961 mod->mbhdlr[mc].cbarg = cbarg;
1962}
1963
1964/**
1965 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1966 * Responsibility of caller to serialize
1967 *
1968 * @param[in] ioc IOC instance
1969 * @param[i] cmd Mailbox command
1970 */
1971void
1972bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1973{
1974 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1975 u32 stat;
1976
1977 /**
1978 * If a previous command is pending, queue new command
1979 */
1980 if (!list_empty(&mod->cmd_q)) {
1981 list_add_tail(&cmd->qe, &mod->cmd_q);
1982 return;
1983 }
1984
1985 /**
1986 * If mailbox is busy, queue command for poll timer
1987 */
1988 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1989 if (stat) {
1990 list_add_tail(&cmd->qe, &mod->cmd_q);
1991 return;
1992 }
1993
1994 /**
1995 * mailbox is free -- queue command to firmware
1996 */
1997 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1998
1999 return;
2000}
2001
2002/**
2003 * Handle mailbox interrupts
2004 */
2005void
2006bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2007{
2008 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2009 struct bfi_mbmsg m;
2010 int mc;
2011
2012 bfa_ioc_msgget(ioc, &m);
2013
2014 /**
2015 * Treat IOC message class as special.
2016 */
2017 mc = m.mh.msg_class;
2018 if (mc == BFI_MC_IOC) {
2019 bfa_ioc_isr(ioc, &m);
2020 return;
2021 }
2022
2023 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2024 return;
2025
2026 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2027}
2028
2029void
2030bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2031{
2032 bfa_ioc_stats(ioc, ioc_hbfails);
2033 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2034 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2035}
2036
2037/**
2038 * return true if IOC is disabled
2039 */
2040bool
2041bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2042{
2043 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2044 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2045}
2046
2047/**
2048 * Add to IOC heartbeat failure notification queue. To be used by common
2049 * modules such as cee, port, diag.
2050 */
2051void
2052bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2053 struct bfa_ioc_notify *notify)
2054{
2055 list_add_tail(&notify->qe, &ioc->notify_q);
2056}
2057
2058#define BFA_MFG_NAME "Brocade"
2059static void
2060bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2061 struct bfa_adapter_attr *ad_attr)
2062{
2063 struct bfi_ioc_attr *ioc_attr;
2064
2065 ioc_attr = ioc->attr;
2066
2067 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2068 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2069 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2070 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2071 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2072 sizeof(struct bfa_mfg_vpd));
2073
2074 ad_attr->nports = bfa_ioc_get_nports(ioc);
2075 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2076
2077 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2078 /* For now, model descr uses same model string */
2079 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2080
2081 ad_attr->card_type = ioc_attr->card_type;
2082 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2083
2084 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2085 ad_attr->prototype = 1;
2086 else
2087 ad_attr->prototype = 0;
2088
2089 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2090 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
2091
2092 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2093 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2094 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2095 ad_attr->asic_rev = ioc_attr->asic_rev;
2096
2097 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2098
2099 ad_attr->cna_capable = ioc->cna;
2100 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2101}
2102
2103static enum bfa_ioc_type
2104bfa_ioc_get_type(struct bfa_ioc *ioc)
2105{
2106 if (!ioc->ctdev || ioc->fcmode)
2107 return BFA_IOC_TYPE_FC;
2108 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2109 return BFA_IOC_TYPE_FCoE;
2110 else if (ioc->ioc_mc == BFI_MC_LL)
2111 return BFA_IOC_TYPE_LL;
2112 else {
2113 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
2114 return BFA_IOC_TYPE_LL;
2115 }
2116}
2117
2118static void
2119bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2120{
2121 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2122 memcpy(serial_num,
2123 (void *)ioc->attr->brcd_serialnum,
2124 BFA_ADAPTER_SERIAL_NUM_LEN);
2125}
2126
2127static void
2128bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2129{
2130 memset(fw_ver, 0, BFA_VERSION_LEN);
2131 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2132}
2133
2134static void
2135bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2136{
2137 BUG_ON(!(chip_rev));
2138
2139 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2140
2141 chip_rev[0] = 'R';
2142 chip_rev[1] = 'e';
2143 chip_rev[2] = 'v';
2144 chip_rev[3] = '-';
2145 chip_rev[4] = ioc->attr->asic_rev;
2146 chip_rev[5] = '\0';
2147}
2148
2149static void
2150bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2151{
2152 memset(optrom_ver, 0, BFA_VERSION_LEN);
2153 memcpy(optrom_ver, ioc->attr->optrom_version,
2154 BFA_VERSION_LEN);
2155}
2156
2157static void
2158bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2159{
2160 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2161 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2162}
2163
2164static void
2165bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2166{
2167 struct bfi_ioc_attr *ioc_attr;
2168
2169 BUG_ON(!(model));
2170 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2171
2172 ioc_attr = ioc->attr;
2173
2174 /**
2175 * model name
2176 */
2177 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2178 BFA_MFG_NAME, ioc_attr->card_type);
2179}
2180
2181static enum bfa_ioc_state
2182bfa_ioc_get_state(struct bfa_ioc *ioc)
2183{
2184 enum bfa_iocpf_state iocpf_st;
2185 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2186
2187 if (ioc_st == BFA_IOC_ENABLING ||
2188 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2189
2190 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2191
2192 switch (iocpf_st) {
2193 case BFA_IOCPF_SEMWAIT:
2194 ioc_st = BFA_IOC_SEMWAIT;
2195 break;
2196
2197 case BFA_IOCPF_HWINIT:
2198 ioc_st = BFA_IOC_HWINIT;
2199 break;
2200
2201 case BFA_IOCPF_FWMISMATCH:
2202 ioc_st = BFA_IOC_FWMISMATCH;
2203 break;
2204
2205 case BFA_IOCPF_FAIL:
2206 ioc_st = BFA_IOC_FAIL;
2207 break;
2208
2209 case BFA_IOCPF_INITFAIL:
2210 ioc_st = BFA_IOC_INITFAIL;
2211 break;
2212
2213 default:
2214 break;
2215 }
2216 }
2217 return ioc_st;
2218}
2219
2220void
2221bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2222{
2223 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2224
2225 ioc_attr->state = bfa_ioc_get_state(ioc);
2226 ioc_attr->port_id = ioc->port_id;
2227
2228 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2229
2230 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2231
2232 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2233 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2234 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2235}
2236
2237/**
2238 * WWN public
2239 */
2240static u64
2241bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2242{
2243 return ioc->attr->pwwn;
2244}
2245
2246mac_t
2247bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2248{
2249 return ioc->attr->mac;
2250}
2251
2252/**
2253 * Firmware failure detected. Start recovery actions.
2254 */
2255static void
2256bfa_ioc_recover(struct bfa_ioc *ioc)
2257{
2258 pr_crit("Heart Beat of IOC has failed\n");
2259 bfa_ioc_stats(ioc, ioc_hbfails);
2260 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2261 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2262}
2263
2264static void
2265bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2266{
2267 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2268 return;
2269}
2270
2271/**
2272 * @dg hal_iocpf_pvt BFA IOC PF private functions
2273 * @{
2274 */
2275
2276static void
2277bfa_iocpf_enable(struct bfa_ioc *ioc)
2278{
2279 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2280}
2281
2282static void
2283bfa_iocpf_disable(struct bfa_ioc *ioc)
2284{
2285 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2286}
2287
2288static void
2289bfa_iocpf_fail(struct bfa_ioc *ioc)
2290{
2291 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2292}
2293
2294static void
2295bfa_iocpf_initfail(struct bfa_ioc *ioc)
2296{
2297 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2298}
2299
2300static void
2301bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2302{
2303 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2304}
2305
2306static void
2307bfa_iocpf_stop(struct bfa_ioc *ioc)
2308{
2309 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2310}
2311
2312void
2313bfa_nw_iocpf_timeout(void *ioc_arg)
2314{
2315 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2316
2317 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2318}
2319
2320void
2321bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2322{
2323 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2324
2325 bfa_ioc_hw_sem_get(ioc);
2326}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
new file mode 100644
index 000000000000..bda866ba6e90
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -0,0 +1,315 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_IOC_H__
20#define __BFA_IOC_H__
21
22#include "bfa_cs.h"
23#include "bfi.h"
24#include "cna.h"
25
26#define BFA_IOC_TOV 3000 /* msecs */
27#define BFA_IOC_HWSEM_TOV 500 /* msecs */
28#define BFA_IOC_HB_TOV 500 /* msecs */
29#define BFA_IOC_HWINIT_MAX 5
30
31/**
32 * PCI device information required by IOC
33 */
34struct bfa_pcidev {
35 int pci_slot;
36 u8 pci_func;
37 u16 device_id;
38 void __iomem *pci_bar_kva;
39};
40
41/**
42 * Structure used to remember the DMA-able memory block's KVA and Physical
43 * Address
44 */
45struct bfa_dma {
46 void *kva; /* ! Kernel virtual address */
47 u64 pa; /* ! Physical address */
48};
49
50#define BFA_DMA_ALIGN_SZ 256
51
52/**
53 * smem size for Crossbow and Catapult
54 */
55#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
56#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
57
58/**
59 * @brief BFA dma address assignment macro. (big endian format)
60 */
61#define bfa_dma_be_addr_set(dma_addr, pa) \
62 __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
63static inline void
64__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
65{
66 dma_addr->a32.addr_lo = (u32) htonl(pa);
67 dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
68}
69
70struct bfa_ioc_regs {
71 void __iomem *hfn_mbox_cmd;
72 void __iomem *hfn_mbox;
73 void __iomem *lpu_mbox_cmd;
74 void __iomem *lpu_mbox;
75 void __iomem *pss_ctl_reg;
76 void __iomem *pss_err_status_reg;
77 void __iomem *app_pll_fast_ctl_reg;
78 void __iomem *app_pll_slow_ctl_reg;
79 void __iomem *ioc_sem_reg;
80 void __iomem *ioc_usage_sem_reg;
81 void __iomem *ioc_init_sem_reg;
82 void __iomem *ioc_usage_reg;
83 void __iomem *host_page_num_fn;
84 void __iomem *heartbeat;
85 void __iomem *ioc_fwstate;
86 void __iomem *alt_ioc_fwstate;
87 void __iomem *ll_halt;
88 void __iomem *alt_ll_halt;
89 void __iomem *err_set;
90 void __iomem *ioc_fail_sync;
91 void __iomem *shirq_isr_next;
92 void __iomem *shirq_msk_next;
93 void __iomem *smem_page_start;
94 u32 smem_pg0;
95};
96
97/**
98 * IOC Mailbox structures
99 */
100typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
101struct bfa_mbox_cmd {
102 struct list_head qe;
103 bfa_mbox_cmd_cbfn_t cbfn;
104 void *cbarg;
105 u32 msg[BFI_IOC_MSGSZ];
106};
107
108/**
109 * IOC mailbox module
110 */
111typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
112struct bfa_ioc_mbox_mod {
113 struct list_head cmd_q; /*!< pending mbox queue */
114 int nmclass; /*!< number of handlers */
115 struct {
116 bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */
117 void *cbarg;
118 } mbhdlr[BFI_MC_MAX];
119};
120
121/**
122 * IOC callback function interfaces
123 */
124typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
125typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
126typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
127typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
128struct bfa_ioc_cbfn {
129 bfa_ioc_enable_cbfn_t enable_cbfn;
130 bfa_ioc_disable_cbfn_t disable_cbfn;
131 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
132 bfa_ioc_reset_cbfn_t reset_cbfn;
133};
134
135/**
136 * IOC event notification mechanism.
137 */
138enum bfa_ioc_event {
139 BFA_IOC_E_ENABLED = 1,
140 BFA_IOC_E_DISABLED = 2,
141 BFA_IOC_E_FAILED = 3,
142};
143
144typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event);
145
146struct bfa_ioc_notify {
147 struct list_head qe;
148 bfa_ioc_notify_cbfn_t cbfn;
149 void *cbarg;
150};
151
152/**
153 * Heartbeat failure notification queue element.
154 */
155struct bfa_ioc_hbfail_notify {
156 struct list_head qe;
157 bfa_ioc_hbfail_cbfn_t cbfn;
158 void *cbarg;
159};
160
161/**
162 * Initialize a heartbeat failure notification structure
163 */
164#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
165 (__notify)->cbfn = (__cbfn); \
166 (__notify)->cbarg = (__cbarg); \
167} while (0)
168
169struct bfa_iocpf {
170 bfa_fsm_t fsm;
171 struct bfa_ioc *ioc;
172 u32 retry_count;
173 bool auto_recover;
174};
175
176struct bfa_ioc {
177 bfa_fsm_t fsm;
178 struct bfa *bfa;
179 struct bfa_pcidev pcidev;
180 struct timer_list ioc_timer;
181 struct timer_list iocpf_timer;
182 struct timer_list sem_timer;
183 struct timer_list hb_timer;
184 u32 hb_count;
185 struct list_head notify_q;
186 void *dbg_fwsave;
187 int dbg_fwsave_len;
188 bool dbg_fwsave_once;
189 enum bfi_mclass ioc_mc;
190 struct bfa_ioc_regs ioc_regs;
191 struct bfa_ioc_drv_stats stats;
192 bool fcmode;
193 bool ctdev;
194 bool cna;
195 bool pllinit;
196 bool stats_busy; /*!< outstanding stats */
197 u8 port_id;
198
199 struct bfa_dma attr_dma;
200 struct bfi_ioc_attr *attr;
201 struct bfa_ioc_cbfn *cbfn;
202 struct bfa_ioc_mbox_mod mbox_mod;
203 struct bfa_ioc_hwif *ioc_hwif;
204 struct bfa_iocpf iocpf;
205};
206
207struct bfa_ioc_hwif {
208 enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
209 bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
210 void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
211 void (*ioc_reg_init) (struct bfa_ioc *ioc);
212 void (*ioc_map_port) (struct bfa_ioc *ioc);
213 void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
214 bool msix);
215 void (*ioc_notify_fail) (struct bfa_ioc *ioc);
216 void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
217 bool (*ioc_sync_start) (struct bfa_ioc *ioc);
218 void (*ioc_sync_join) (struct bfa_ioc *ioc);
219 void (*ioc_sync_leave) (struct bfa_ioc *ioc);
220 void (*ioc_sync_ack) (struct bfa_ioc *ioc);
221 bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
222};
223
224#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
225#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
226#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
227#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
228#define bfa_ioc_fetch_stats(__ioc, __stats) \
229 (((__stats)->drv_stats) = (__ioc)->stats)
230#define bfa_ioc_clr_stats(__ioc) \
231 memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
232#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
233#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
234#define bfa_ioc_speed_sup(__ioc) \
235 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
236#define bfa_ioc_get_nports(__ioc) \
237 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
238
239#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
240#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \
241 ((_ioc)->stats.hb_count = (_hb_count))
242#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
243#define BFA_IOC_FWIMG_TYPE(__ioc) \
244 (((__ioc)->ctdev) ? \
245 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
246 BFI_IMAGE_CB_FC)
247#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
248 (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
249#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
250#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
251#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
252
253/**
254 * IOC mailbox interface
255 */
256void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
257void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
258void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
259 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
260
261/**
262 * IOC interfaces
263 */
264
265#define bfa_ioc_pll_init_asic(__ioc) \
266 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
267 (__ioc)->fcmode))
268
269#define bfa_ioc_isr_mode_set(__ioc, __msix) \
270 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
271#define bfa_ioc_ownership_reset(__ioc) \
272 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
273
274void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
275
276void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
277 struct bfa_ioc_cbfn *cbfn);
278void bfa_nw_ioc_auto_recover(bool auto_recover);
279void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
280void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
281 enum bfi_mclass mc);
282u32 bfa_nw_ioc_meminfo(void);
283void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
284void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
285void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
286
287void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
288bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
289void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
290void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
291 struct bfa_ioc_notify *notify);
292bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
293void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
294void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
295void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
296 struct bfi_ioc_image_hdr *fwhdr);
297bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
298 struct bfi_ioc_image_hdr *fwhdr);
299mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
300
301/*
302 * Timeout APIs
303 */
304void bfa_nw_ioc_timeout(void *ioc);
305void bfa_nw_ioc_hb_check(void *ioc);
306void bfa_nw_iocpf_timeout(void *ioc);
307void bfa_nw_iocpf_sem_timeout(void *ioc);
308
309/*
310 * F/W Image Size & Chunk
311 */
312u32 *bfa_cb_image_get_chunk(int type, u32 off);
313u32 bfa_cb_image_get_size(int type);
314
315#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
new file mode 100644
index 000000000000..209f1f320343
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -0,0 +1,512 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_reg.h"
23#include "bfa_defs.h"
24
25#define bfa_ioc_ct_sync_pos(__ioc) \
26 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27#define BFA_IOC_SYNC_REQD_SH 16
28#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
33
34/*
35 * forward declarations
36 */
37static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
38static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
39static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
40static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
42static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
44static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
45static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
46static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
47static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
48static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
49static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
50
51static struct bfa_ioc_hwif nw_hwif_ct;
52
53static void
54bfa_ioc_set_ctx_hwif(struct bfa_ioc *ioc, struct bfa_ioc_hwif *hwif)
55{
56 hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
57 hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
58 hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
59 hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
60 hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
61 hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
62 hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
63 hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
64 hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
65}
66
67/**
68 * Called from bfa_ioc_attach() to map asic specific calls.
69 */
70void
71bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
72{
73 bfa_ioc_set_ctx_hwif(ioc, &nw_hwif_ct);
74
75 nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
76 nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
77 nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
78 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
79 ioc->ioc_hwif = &nw_hwif_ct;
80}
81
82/**
83 * Return true if firmware of current driver matches the running firmware.
84 */
85static bool
86bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
87{
88 enum bfi_ioc_state ioc_fwstate;
89 u32 usecnt;
90 struct bfi_ioc_image_hdr fwhdr;
91
92 /**
93 * If bios boot (flash based) -- do not increment usage count
94 */
95 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
96 BFA_IOC_FWIMG_MINSZ)
97 return true;
98
99 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
100 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
101
102 /**
103 * If usage count is 0, always return TRUE.
104 */
105 if (usecnt == 0) {
106 writel(1, ioc->ioc_regs.ioc_usage_reg);
107 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
108 writel(0, ioc->ioc_regs.ioc_fail_sync);
109 return true;
110 }
111
112 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
113
114 /**
115 * Use count cannot be non-zero and chip in uninitialized state.
116 */
117 BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
118
119 /**
120 * Check if another driver with a different firmware is active
121 */
122 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
123 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
124 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
125 return false;
126 }
127
128 /**
129 * Same firmware version. Increment the reference count.
130 */
131 usecnt++;
132 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
133 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
134 return true;
135}
136
137static void
138bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
139{
140 u32 usecnt;
141
142 /**
143 * If bios boot (flash based) -- do not decrement usage count
144 */
145 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
146 BFA_IOC_FWIMG_MINSZ)
147 return;
148
149 /**
150 * decrement usage count
151 */
152 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
153 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
154 BUG_ON(!(usecnt > 0));
155
156 usecnt--;
157 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
158
159 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
160}
161
162/**
163 * Notify other functions on HB failure.
164 */
165static void
166bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
167{
168 if (ioc->cna) {
169 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
170 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
171 /* Wait for halt to take effect */
172 readl(ioc->ioc_regs.ll_halt);
173 readl(ioc->ioc_regs.alt_ll_halt);
174 } else {
175 writel(~0U, ioc->ioc_regs.err_set);
176 readl(ioc->ioc_regs.err_set);
177 }
178}
179
180/**
181 * Host to LPU mailbox message addresses
182 */
183static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
184 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
185 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
186 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
187 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
188};
189
190/**
191 * Host <-> LPU mailbox command/status registers - port 0
192 */
193static struct { u32 hfn, lpu; } ct_p0reg[] = {
194 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
195 { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
196 { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
197 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
198};
199
200/**
201 * Host <-> LPU mailbox command/status registers - port 1
202 */
203static struct { u32 hfn, lpu; } ct_p1reg[] = {
204 { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
205 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
206 { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
207 { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
208};
209
210static void
211bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
212{
213 void __iomem *rb;
214 int pcifn = bfa_ioc_pcifn(ioc);
215
216 rb = bfa_ioc_bar0(ioc);
217
218 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
219 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
220 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
221
222 if (ioc->port_id == 0) {
223 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
224 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
225 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
226 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
227 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
228 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
229 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
230 } else {
231 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
232 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
233 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
234 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
235 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
236 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
237 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
238 }
239
240 /*
241 * PSS control registers
242 */
243 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
244 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
245 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
246 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
247
248 /*
249 * IOC semaphore registers and serialization
250 */
251 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
252 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
253 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
254 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
255 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
256
257 /**
258 * sram memory access
259 */
260 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
261 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
262
263 /*
264 * err set reg : for notification of hb failure in fcmode
265 */
266 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
267}
268
269/**
270 * Initialize IOC to port mapping.
271 */
272
273#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
274static void
275bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
276{
277 void __iomem *rb = ioc->pcidev.pci_bar_kva;
278 u32 r32;
279
280 /**
281 * For catapult, base port id on personality register and IOC type
282 */
283 r32 = readl(rb + FNC_PERS_REG);
284 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
285 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
286
287}
288
289/**
290 * Set interrupt mode for a function: INTX or MSIX
291 */
292static void
293bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
294{
295 void __iomem *rb = ioc->pcidev.pci_bar_kva;
296 u32 r32, mode;
297
298 r32 = readl(rb + FNC_PERS_REG);
299
300 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
301 __F0_INTX_STATUS;
302
303 /**
304 * If already in desired mode, do not change anything
305 */
306 if ((!msix && mode) || (msix && !mode))
307 return;
308
309 if (msix)
310 mode = __F0_INTX_STATUS_MSIX;
311 else
312 mode = __F0_INTX_STATUS_INTA;
313
314 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
315 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
316
317 writel(r32, rb + FNC_PERS_REG);
318}
319
320/**
321 * Cleanup hw semaphore and usecnt registers
322 */
323static void
324bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
325{
326 if (ioc->cna) {
327 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
328 writel(0, ioc->ioc_regs.ioc_usage_reg);
329 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
330 }
331
332 /*
333 * Read the hw sem reg to make sure that it is locked
334 * before we clear it. If it is not locked, writing 1
335 * will lock it instead of clearing it.
336 */
337 readl(ioc->ioc_regs.ioc_sem_reg);
338 bfa_nw_ioc_hw_sem_release(ioc);
339}
340
341/**
342 * Synchronized IOC failure processing routines
343 */
344static bool
345bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
346{
347 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
348 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
349
350 /*
351 * Driver load time. If the sync required bit for this PCI fn
352 * is set, it is due to an unclean exit by the driver for this
353 * PCI fn in the previous incarnation. Whoever comes here first
354 * should clean it up, no matter which PCI fn.
355 */
356
357 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
358 writel(0, ioc->ioc_regs.ioc_fail_sync);
359 writel(1, ioc->ioc_regs.ioc_usage_reg);
360 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
361 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
362 return true;
363 }
364
365 return bfa_ioc_ct_sync_complete(ioc);
366}
367/**
368 * Synchronized IOC failure processing routines
369 */
370static void
371bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
372{
373 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
374 u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
375
376 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
377}
378
379static void
380bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
381{
382 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
383 u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
384 bfa_ioc_ct_sync_pos(ioc);
385
386 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
387}
388
389static void
390bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
391{
392 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
393
394 writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
395}
396
397static bool
398bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
399{
400 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
401 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
402 u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
403 u32 tmp_ackd;
404
405 if (sync_ackd == 0)
406 return true;
407
408 /**
409 * The check below is to see whether any other PCI fn
410 * has reinitialized the ASIC (reset sync_ackd bits)
411 * and failed again while this IOC was waiting for hw
412 * semaphore (in bfa_iocpf_sm_semwait()).
413 */
414 tmp_ackd = sync_ackd;
415 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
416 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
417 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
418
419 if (sync_reqd == sync_ackd) {
420 writel(bfa_ioc_ct_clear_sync_ackd(r32),
421 ioc->ioc_regs.ioc_fail_sync);
422 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
423 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
424 return true;
425 }
426
427 /**
428 * If another PCI fn reinitialized and failed again while
429 * this IOC was waiting for hw sem, the sync_ackd bit for
430 * this IOC need to be set again to allow reinitialization.
431 */
432 if (tmp_ackd != sync_ackd)
433 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
434
435 return false;
436}
437
438static enum bfa_status
439bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
440{
441 u32 pll_sclk, pll_fclk, r32;
442
443 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
444 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
445 __APP_PLL_SCLK_JITLMT0_1(3U) |
446 __APP_PLL_SCLK_CNTLMT0_1(1U);
447 pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
448 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
449 __APP_PLL_LCLK_JITLMT0_1(3U) |
450 __APP_PLL_LCLK_CNTLMT0_1(1U);
451
452 if (fcmode) {
453 writel(0, (rb + OP_MODE));
454 writel(__APP_EMS_CMLCKSEL |
455 __APP_EMS_REFCKBUFEN2 |
456 __APP_EMS_CHANNEL_SEL,
457 (rb + ETH_MAC_SER_REG));
458 } else {
459 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
460 writel(__APP_EMS_REFCKBUFEN1,
461 (rb + ETH_MAC_SER_REG));
462 }
463 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
464 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
465 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
466 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
467 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
468 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
469 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
470 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
471 writel(pll_sclk |
472 __APP_PLL_SCLK_LOGIC_SOFT_RESET,
473 rb + APP_PLL_SCLK_CTL_REG);
474 writel(pll_fclk |
475 __APP_PLL_LCLK_LOGIC_SOFT_RESET,
476 rb + APP_PLL_LCLK_CTL_REG);
477 writel(pll_sclk |
478 __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
479 rb + APP_PLL_SCLK_CTL_REG);
480 writel(pll_fclk |
481 __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
482 rb + APP_PLL_LCLK_CTL_REG);
483 readl(rb + HOSTFN0_INT_MSK);
484 udelay(2000);
485 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
486 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
487 writel(pll_sclk |
488 __APP_PLL_SCLK_ENABLE,
489 rb + APP_PLL_SCLK_CTL_REG);
490 writel(pll_fclk |
491 __APP_PLL_LCLK_ENABLE,
492 rb + APP_PLL_LCLK_CTL_REG);
493
494 if (!fcmode) {
495 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
496 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
497 }
498 r32 = readl((rb + PSS_CTL_REG));
499 r32 &= ~__PSS_LMEM_RESET;
500 writel(r32, (rb + PSS_CTL_REG));
501 udelay(1000);
502 if (!fcmode) {
503 writel(0, (rb + PMM_1T_RESET_REG_P0));
504 writel(0, (rb + PMM_1T_RESET_REG_P1));
505 }
506
507 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
508 udelay(1000);
509 r32 = readl((rb + MBIST_STAT_REG));
510 writel(0, (rb + MBIST_CTL_REG));
511 return BFA_STATUS_OK;
512}
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
new file mode 100644
index 000000000000..088211c2724f
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -0,0 +1,400 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFI_H__
20#define __BFI_H__
21
22#include "bfa_defs.h"
23
24#pragma pack(1)
25
26/**
27 * BFI FW image type
28 */
29#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
30#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
31enum {
32 BFI_IMAGE_CB_FC,
33 BFI_IMAGE_CT_FC,
34 BFI_IMAGE_CT_CNA,
35 BFI_IMAGE_MAX,
36};
37
38/**
39 * Msg header common to all msgs
40 */
41struct bfi_mhdr {
42 u8 msg_class; /*!< @ref enum bfi_mclass */
43 u8 msg_id; /*!< msg opcode with in the class */
44 union {
45 struct {
46 u8 rsvd;
47 u8 lpu_id; /*!< msg destination */
48 } h2i;
49 u16 i2htok; /*!< token in msgs to host */
50 } mtag;
51};
52
53#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
54 (_mh).msg_class = (_mc); \
55 (_mh).msg_id = (_op); \
56 (_mh).mtag.h2i.lpu_id = (_lpuid); \
57} while (0)
58
59#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
60 (_mh).msg_class = (_mc); \
61 (_mh).msg_id = (_op); \
62 (_mh).mtag.i2htok = (_i2htok); \
63} while (0)
64
65/*
66 * Message opcodes: 0-127 to firmware, 128-255 to host
67 */
68#define BFI_I2H_OPCODE_BASE 128
69#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
70
71/**
72 ****************************************************************************
73 *
74 * Scatter Gather Element and Page definition
75 *
76 ****************************************************************************
77 */
78
79#define BFI_SGE_INLINE 1
80#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
81
82/**
83 * SG Flags
84 */
85enum {
86 BFI_SGE_DATA = 0, /*!< data address, not last */
87 BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
88 BFI_SGE_DATA_LAST = 3, /*!< data address, last */
89 BFI_SGE_LINK = 2, /*!< link address */
90 BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
91};
92
93/**
94 * DMA addresses
95 */
96union bfi_addr_u {
97 struct {
98 u32 addr_lo;
99 u32 addr_hi;
100 } a32;
101};
102
103/**
104 * Scatter Gather Element
105 */
106struct bfi_sge {
107#ifdef __BIGENDIAN
108 u32 flags:2,
109 rsvd:2,
110 sg_len:28;
111#else
112 u32 sg_len:28,
113 rsvd:2,
114 flags:2;
115#endif
116 union bfi_addr_u sga;
117};
118
119/**
120 * Scatter Gather Page
121 */
122#define BFI_SGPG_DATA_SGES 7
123#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
124#define BFI_SGPG_RSVD_WD_LEN 8
125struct bfi_sgpg {
126 struct bfi_sge sges[BFI_SGPG_SGES_MAX];
127 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
128};
129
130/*
131 * Large Message structure - 128 Bytes size Msgs
132 */
133#define BFI_LMSG_SZ 128
134#define BFI_LMSG_PL_WSZ \
135 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
136
137struct bfi_msg {
138 struct bfi_mhdr mhdr;
139 u32 pl[BFI_LMSG_PL_WSZ];
140};
141
142/**
143 * Mailbox message structure
144 */
145#define BFI_MBMSG_SZ 7
146struct bfi_mbmsg {
147 struct bfi_mhdr mh;
148 u32 pl[BFI_MBMSG_SZ];
149};
150
151/**
152 * Message Classes
153 */
154enum bfi_mclass {
155 BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
156 BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
157 BFI_MC_FLASH = 3, /*!< Flash message class */
158 BFI_MC_CEE = 4, /*!< CEE */
159 BFI_MC_FCPORT = 5, /*!< FC port */
160 BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
161 BFI_MC_LL = 7, /*!< Link Layer */
162 BFI_MC_UF = 8, /*!< Unsolicited frame receive */
163 BFI_MC_FCXP = 9, /*!< FC Transport */
164 BFI_MC_LPS = 10, /*!< lport fc login services */
165 BFI_MC_RPORT = 11, /*!< Remote port */
166 BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
167 BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
168 BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
169 BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
170 BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
171 BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
172 BFI_MC_TSKIM = 18, /*!< Initiator Task management */
173 BFI_MC_SBOOT = 19, /*!< SAN boot services */
174 BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
175 BFI_MC_PORT = 21, /*!< Physical port */
176 BFI_MC_SFP = 22, /*!< SFP module */
177 BFI_MC_MSGQ = 23, /*!< MSGQ */
178 BFI_MC_ENET = 24, /*!< ENET commands/responses */
179 BFI_MC_MAX = 32
180};
181
182#define BFI_IOC_MAX_CQS 4
183#define BFI_IOC_MAX_CQS_ASIC 8
184#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
185
186#define BFI_BOOT_TYPE_OFF 8
187#define BFI_BOOT_LOADER_OFF 12
188
189#define BFI_BOOT_TYPE_NORMAL 0
190#define BFI_BOOT_TYPE_FLASH 1
191#define BFI_BOOT_TYPE_MEMTEST 2
192
193#define BFI_BOOT_LOADER_OS 0
194
195#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
196#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
197
198/**
199 *----------------------------------------------------------------------
200 * IOC
201 *----------------------------------------------------------------------
202 */
203
204enum bfi_ioc_h2i_msgs {
205 BFI_IOC_H2I_ENABLE_REQ = 1,
206 BFI_IOC_H2I_DISABLE_REQ = 2,
207 BFI_IOC_H2I_GETATTR_REQ = 3,
208 BFI_IOC_H2I_DBG_SYNC = 4,
209 BFI_IOC_H2I_DBG_DUMP = 5,
210};
211
212enum bfi_ioc_i2h_msgs {
213 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
214 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
215 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
216 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
217 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
218};
219
220/**
221 * BFI_IOC_H2I_GETATTR_REQ message
222 */
223struct bfi_ioc_getattr_req {
224 struct bfi_mhdr mh;
225 union bfi_addr_u attr_addr;
226};
227
228struct bfi_ioc_attr {
229 u64 mfg_pwwn; /*!< Mfg port wwn */
230 u64 mfg_nwwn; /*!< Mfg node wwn */
231 mac_t mfg_mac; /*!< Mfg mac */
232 u16 rsvd_a;
233 u64 pwwn;
234 u64 nwwn;
235 mac_t mac; /*!< PBC or Mfg mac */
236 u16 rsvd_b;
237 mac_t fcoe_mac;
238 u16 rsvd_c;
239 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
240 u8 pcie_gen;
241 u8 pcie_lanes_orig;
242 u8 pcie_lanes;
243 u8 rx_bbcredit; /*!< receive buffer credits */
244 u32 adapter_prop; /*!< adapter properties */
245 u16 maxfrsize; /*!< max receive frame size */
246 char asic_rev;
247 u8 rsvd_d;
248 char fw_version[BFA_VERSION_LEN];
249 char optrom_version[BFA_VERSION_LEN];
250 struct bfa_mfg_vpd vpd;
251 u32 card_type; /*!< card type */
252};
253
254/**
255 * BFI_IOC_I2H_GETATTR_REPLY message
256 */
257struct bfi_ioc_getattr_reply {
258 struct bfi_mhdr mh; /*!< Common msg header */
259 u8 status; /*!< cfg reply status */
260 u8 rsvd[3];
261};
262
263/**
264 * Firmware memory page offsets
265 */
266#define BFI_IOC_SMEM_PG0_CB (0x40)
267#define BFI_IOC_SMEM_PG0_CT (0x180)
268
269/**
270 * Firmware statistic offset
271 */
272#define BFI_IOC_FWSTATS_OFF (0x6B40)
273#define BFI_IOC_FWSTATS_SZ (4096)
274
275/**
276 * Firmware trace offset
277 */
278#define BFI_IOC_TRC_OFF (0x4b00)
279#define BFI_IOC_TRC_ENTS 256
280
281#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
282#define BFI_IOC_MD5SUM_SZ 4
283struct bfi_ioc_image_hdr {
284 u32 signature; /*!< constant signature */
285 u32 rsvd_a;
286 u32 exec; /*!< exec vector */
287 u32 param; /*!< parameters */
288 u32 rsvd_b[4];
289 u32 md5sum[BFI_IOC_MD5SUM_SZ];
290};
291
292enum bfi_fwboot_type {
293 BFI_FWBOOT_TYPE_NORMAL = 0,
294 BFI_FWBOOT_TYPE_FLASH = 1,
295 BFI_FWBOOT_TYPE_MEMTEST = 2,
296};
297
298/**
299 * BFI_IOC_I2H_READY_EVENT message
300 */
301struct bfi_ioc_rdy_event {
302 struct bfi_mhdr mh; /*!< common msg header */
303 u8 init_status; /*!< init event status */
304 u8 rsvd[3];
305};
306
307struct bfi_ioc_hbeat {
308 struct bfi_mhdr mh; /*!< common msg header */
309 u32 hb_count; /*!< current heart beat count */
310};
311
312/**
313 * IOC hardware/firmware state
314 */
315enum bfi_ioc_state {
316 BFI_IOC_UNINIT = 0, /*!< not initialized */
317 BFI_IOC_INITING = 1, /*!< h/w is being initialized */
318 BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
319 BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
320 BFI_IOC_OP = 4, /*!< IOC is operational */
321 BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
322 BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
323 BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
324 BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
325 BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
326};
327
328#define BFI_IOC_ENDIAN_SIG 0x12345678
329
330enum {
331 BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
332 BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
333 BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
334 BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
335 BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
336 BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
337 BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
338 BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
339 BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
340 BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
341};
342
343#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
344 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
345 BFI_ADAPTER_ ## __prop ## _SH)
346#define BFI_ADAPTER_SETP(__prop, __val) \
347 ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
348#define BFI_ADAPTER_IS_PROTO(__adap_type) \
349 ((__adap_type) & BFI_ADAPTER_PROTO)
350#define BFI_ADAPTER_IS_TTV(__adap_type) \
351 ((__adap_type) & BFI_ADAPTER_TTV)
352#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
353 ((__adap_type) & BFI_ADAPTER_UNSUPP)
354#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
355 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
356 BFI_ADAPTER_UNSUPP))
357
358/**
359 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
360 */
361struct bfi_ioc_ctrl_req {
362 struct bfi_mhdr mh;
363 u8 ioc_class;
364 u8 rsvd[3];
365 u32 tv_sec;
366};
367
368/**
369 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
370 */
371struct bfi_ioc_ctrl_reply {
372 struct bfi_mhdr mh; /*!< Common msg header */
373 u8 status; /*!< enable/disable status */
374 u8 rsvd[3];
375};
376
377#define BFI_IOC_MSGSZ 8
378/**
379 * H2I Messages
380 */
381union bfi_ioc_h2i_msg_u {
382 struct bfi_mhdr mh;
383 struct bfi_ioc_ctrl_req enable_req;
384 struct bfi_ioc_ctrl_req disable_req;
385 struct bfi_ioc_getattr_req getattr_req;
386 u32 mboxmsg[BFI_IOC_MSGSZ];
387};
388
389/**
390 * I2H Messages
391 */
392union bfi_ioc_i2h_msg_u {
393 struct bfi_mhdr mh;
394 struct bfi_ioc_rdy_event rdy_event;
395 u32 mboxmsg[BFI_IOC_MSGSZ];
396};
397
398#pragma pack()
399
400#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
new file mode 100644
index 000000000000..4eecabea397b
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -0,0 +1,199 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFI_CNA_H__
19#define __BFI_CNA_H__
20
21#include "bfi.h"
22#include "bfa_defs_cna.h"
23
24#pragma pack(1)
25
26enum bfi_port_h2i {
27 BFI_PORT_H2I_ENABLE_REQ = (1),
28 BFI_PORT_H2I_DISABLE_REQ = (2),
29 BFI_PORT_H2I_GET_STATS_REQ = (3),
30 BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
31};
32
33enum bfi_port_i2h {
34 BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
35 BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
36 BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
37 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
38};
39
40/**
41 * Generic REQ type
42 */
43struct bfi_port_generic_req {
44 struct bfi_mhdr mh; /*!< msg header */
45 u32 msgtag; /*!< msgtag for reply */
46 u32 rsvd;
47};
48
49/**
50 * Generic RSP type
51 */
52struct bfi_port_generic_rsp {
53 struct bfi_mhdr mh; /*!< common msg header */
54 u8 status; /*!< port enable status */
55 u8 rsvd[3];
56 u32 msgtag; /*!< msgtag for reply */
57};
58
59/**
60 * @todo
61 * BFI_PORT_H2I_ENABLE_REQ
62 */
63
64/**
65 * @todo
66 * BFI_PORT_I2H_ENABLE_RSP
67 */
68
69/**
70 * BFI_PORT_H2I_DISABLE_REQ
71 */
72
73/**
74 * BFI_PORT_I2H_DISABLE_RSP
75 */
76
77/**
78 * BFI_PORT_H2I_GET_STATS_REQ
79 */
80struct bfi_port_get_stats_req {
81 struct bfi_mhdr mh; /*!< common msg header */
82 union bfi_addr_u dma_addr;
83};
84
85/**
86 * BFI_PORT_I2H_GET_STATS_RSP
87 */
88
89/**
90 * BFI_PORT_H2I_CLEAR_STATS_REQ
91 */
92
93/**
94 * BFI_PORT_I2H_CLEAR_STATS_RSP
95 */
96
97union bfi_port_h2i_msg_u {
98 struct bfi_mhdr mh;
99 struct bfi_port_generic_req enable_req;
100 struct bfi_port_generic_req disable_req;
101 struct bfi_port_get_stats_req getstats_req;
102 struct bfi_port_generic_req clearstats_req;
103};
104
105union bfi_port_i2h_msg_u {
106 struct bfi_mhdr mh;
107 struct bfi_port_generic_rsp enable_rsp;
108 struct bfi_port_generic_rsp disable_rsp;
109 struct bfi_port_generic_rsp getstats_rsp;
110 struct bfi_port_generic_rsp clearstats_rsp;
111};
112
113/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
114enum bfi_cee_h2i_msgs {
115 BFI_CEE_H2I_GET_CFG_REQ = 1,
116 BFI_CEE_H2I_RESET_STATS = 2,
117 BFI_CEE_H2I_GET_STATS_REQ = 3,
118};
119
120/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
121enum bfi_cee_i2h_msgs {
122 BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
123 BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
124 BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
125};
126
127/* Data structures */
128
129/*
130 * @brief H2I command structure for resetting the stats.
131 * BFI_CEE_H2I_RESET_STATS
132 */
133struct bfi_lldp_reset_stats {
134 struct bfi_mhdr mh;
135};
136
137/*
138 * @brief H2I command structure for resetting the stats.
139 * BFI_CEE_H2I_RESET_STATS
140 */
141struct bfi_cee_reset_stats {
142 struct bfi_mhdr mh;
143};
144
145/*
146 * @brief get configuration command from host
147 * BFI_CEE_H2I_GET_CFG_REQ
148 */
149struct bfi_cee_get_req {
150 struct bfi_mhdr mh;
151 union bfi_addr_u dma_addr;
152};
153
154/*
155 * @brief reply message from firmware
156 * BFI_CEE_I2H_GET_CFG_RSP
157 */
158struct bfi_cee_get_rsp {
159 struct bfi_mhdr mh;
160 u8 cmd_status;
161 u8 rsvd[3];
162};
163
164/*
165 * @brief get configuration command from host
166 * BFI_CEE_H2I_GET_STATS_REQ
167 */
168struct bfi_cee_stats_req {
169 struct bfi_mhdr mh;
170 union bfi_addr_u dma_addr;
171};
172
173/*
174 * @brief reply message from firmware
175 * BFI_CEE_I2H_GET_STATS_RSP
176 */
177struct bfi_cee_stats_rsp {
178 struct bfi_mhdr mh;
179 u8 cmd_status;
180 u8 rsvd[3];
181};
182
183/* @brief mailbox command structures from host to firmware */
184union bfi_cee_h2i_msg_u {
185 struct bfi_mhdr mh;
186 struct bfi_cee_get_req get_req;
187 struct bfi_cee_stats_req stats_req;
188};
189
190/* @brief mailbox message structures from firmware to host */
191union bfi_cee_i2h_msg_u {
192 struct bfi_mhdr mh;
193 struct bfi_cee_get_rsp get_rsp;
194 struct bfi_cee_stats_rsp stats_rsp;
195};
196
197#pragma pack()
198
199#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_ll.h b/drivers/net/ethernet/brocade/bna/bfi_ll.h
new file mode 100644
index 000000000000..bee4d054066a
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_ll.h
@@ -0,0 +1,438 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFI_LL_H__
19#define __BFI_LL_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25/**
26 * @brief
27 * "enums" for all LL mailbox messages other than IOC
28 */
29enum {
30 BFI_LL_H2I_MAC_UCAST_SET_REQ = 1,
31 BFI_LL_H2I_MAC_UCAST_ADD_REQ = 2,
32 BFI_LL_H2I_MAC_UCAST_DEL_REQ = 3,
33
34 BFI_LL_H2I_MAC_MCAST_ADD_REQ = 4,
35 BFI_LL_H2I_MAC_MCAST_DEL_REQ = 5,
36 BFI_LL_H2I_MAC_MCAST_FILTER_REQ = 6,
37 BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ = 7,
38
39 BFI_LL_H2I_PORT_ADMIN_REQ = 8,
40 BFI_LL_H2I_STATS_GET_REQ = 9,
41 BFI_LL_H2I_STATS_CLEAR_REQ = 10,
42
43 BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ = 11,
44 BFI_LL_H2I_RXF_DEFAULT_SET_REQ = 12,
45
46 BFI_LL_H2I_TXQ_STOP_REQ = 13,
47 BFI_LL_H2I_RXQ_STOP_REQ = 14,
48
49 BFI_LL_H2I_DIAG_LOOPBACK_REQ = 15,
50
51 BFI_LL_H2I_SET_PAUSE_REQ = 16,
52 BFI_LL_H2I_MTU_INFO_REQ = 17,
53
54 BFI_LL_H2I_RX_REQ = 18,
55} ;
56
57enum {
58 BFI_LL_I2H_MAC_UCAST_SET_RSP = BFA_I2HM(1),
59 BFI_LL_I2H_MAC_UCAST_ADD_RSP = BFA_I2HM(2),
60 BFI_LL_I2H_MAC_UCAST_DEL_RSP = BFA_I2HM(3),
61
62 BFI_LL_I2H_MAC_MCAST_ADD_RSP = BFA_I2HM(4),
63 BFI_LL_I2H_MAC_MCAST_DEL_RSP = BFA_I2HM(5),
64 BFI_LL_I2H_MAC_MCAST_FILTER_RSP = BFA_I2HM(6),
65 BFI_LL_I2H_MAC_MCAST_DEL_ALL_RSP = BFA_I2HM(7),
66
67 BFI_LL_I2H_PORT_ADMIN_RSP = BFA_I2HM(8),
68 BFI_LL_I2H_STATS_GET_RSP = BFA_I2HM(9),
69 BFI_LL_I2H_STATS_CLEAR_RSP = BFA_I2HM(10),
70
71 BFI_LL_I2H_RXF_PROMISCUOUS_SET_RSP = BFA_I2HM(11),
72 BFI_LL_I2H_RXF_DEFAULT_SET_RSP = BFA_I2HM(12),
73
74 BFI_LL_I2H_TXQ_STOP_RSP = BFA_I2HM(13),
75 BFI_LL_I2H_RXQ_STOP_RSP = BFA_I2HM(14),
76
77 BFI_LL_I2H_DIAG_LOOPBACK_RSP = BFA_I2HM(15),
78
79 BFI_LL_I2H_SET_PAUSE_RSP = BFA_I2HM(16),
80
81 BFI_LL_I2H_MTU_INFO_RSP = BFA_I2HM(17),
82 BFI_LL_I2H_RX_RSP = BFA_I2HM(18),
83
84 BFI_LL_I2H_LINK_DOWN_AEN = BFA_I2HM(19),
85 BFI_LL_I2H_LINK_UP_AEN = BFA_I2HM(20),
86
87 BFI_LL_I2H_PORT_ENABLE_AEN = BFA_I2HM(21),
88 BFI_LL_I2H_PORT_DISABLE_AEN = BFA_I2HM(22),
89} ;
90
91/**
92 * @brief bfi_ll_mac_addr_req is used by:
93 * BFI_LL_H2I_MAC_UCAST_SET_REQ
94 * BFI_LL_H2I_MAC_UCAST_ADD_REQ
95 * BFI_LL_H2I_MAC_UCAST_DEL_REQ
96 * BFI_LL_H2I_MAC_MCAST_ADD_REQ
97 * BFI_LL_H2I_MAC_MCAST_DEL_REQ
98 */
99struct bfi_ll_mac_addr_req {
100 struct bfi_mhdr mh; /*!< common msg header */
101 u8 rxf_id;
102 u8 rsvd1[3];
103 mac_t mac_addr;
104 u8 rsvd2[2];
105};
106
107/**
108 * @brief bfi_ll_mcast_filter_req is used by:
109 * BFI_LL_H2I_MAC_MCAST_FILTER_REQ
110 */
111struct bfi_ll_mcast_filter_req {
112 struct bfi_mhdr mh; /*!< common msg header */
113 u8 rxf_id;
114 u8 enable;
115 u8 rsvd[2];
116};
117
118/**
119 * @brief bfi_ll_mcast_del_all is used by:
120 * BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ
121 */
122struct bfi_ll_mcast_del_all_req {
123 struct bfi_mhdr mh; /*!< common msg header */
124 u8 rxf_id;
125 u8 rsvd[3];
126};
127
128/**
129 * @brief bfi_ll_q_stop_req is used by:
130 * BFI_LL_H2I_TXQ_STOP_REQ
131 * BFI_LL_H2I_RXQ_STOP_REQ
132 */
133struct bfi_ll_q_stop_req {
134 struct bfi_mhdr mh; /*!< common msg header */
135 u32 q_id_mask[2]; /* !< bit-mask for queue ids */
136};
137
138/**
139 * @brief bfi_ll_stats_req is used by:
140 * BFI_LL_I2H_STATS_GET_REQ
141 * BFI_LL_I2H_STATS_CLEAR_REQ
142 */
143struct bfi_ll_stats_req {
144 struct bfi_mhdr mh; /*!< common msg header */
145 u16 stats_mask; /* !< bit-mask for non-function statistics */
146 u8 rsvd[2];
147 u32 rxf_id_mask[2]; /* !< bit-mask for RxF Statistics */
148 u32 txf_id_mask[2]; /* !< bit-mask for TxF Statistics */
149 union bfi_addr_u host_buffer; /* !< where statistics are returned */
150};
151
152/**
153 * @brief defines for "stats_mask" above.
154 */
155#define BFI_LL_STATS_MAC (1 << 0) /* !< MAC Statistics */
156#define BFI_LL_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
157#define BFI_LL_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
158#define BFI_LL_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
159#define BFI_LL_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
160
161#define BFI_LL_STATS_ALL 0x1f
162
163/**
164 * @brief bfi_ll_port_admin_req
165 */
166struct bfi_ll_port_admin_req {
167 struct bfi_mhdr mh; /*!< common msg header */
168 u8 up;
169 u8 rsvd[3];
170};
171
172/**
173 * @brief bfi_ll_rxf_req is used by:
174 * BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
175 * BFI_LL_H2I_RXF_DEFAULT_SET_REQ
176 */
177struct bfi_ll_rxf_req {
178 struct bfi_mhdr mh; /*!< common msg header */
179 u8 rxf_id;
180 u8 enable;
181 u8 rsvd[2];
182};
183
184/**
185 * @brief bfi_ll_rxf_multi_req is used by:
186 * BFI_LL_H2I_RX_REQ
187 */
188struct bfi_ll_rxf_multi_req {
189 struct bfi_mhdr mh; /*!< common msg header */
190 u32 rxf_id_mask[2];
191 u8 enable;
192 u8 rsvd[3];
193};
194
195/**
196 * @brief enum for Loopback opmodes
197 */
198enum {
199 BFI_LL_DIAG_LB_OPMODE_EXT = 0,
200 BFI_LL_DIAG_LB_OPMODE_CBL = 1,
201};
202
203/**
204 * @brief bfi_ll_set_pause_req is used by:
205 * BFI_LL_H2I_SET_PAUSE_REQ
206 */
207struct bfi_ll_set_pause_req {
208 struct bfi_mhdr mh;
209 u8 tx_pause; /* 1 = enable, 0 = disable */
210 u8 rx_pause; /* 1 = enable, 0 = disable */
211 u8 rsvd[2];
212};
213
214/**
215 * @brief bfi_ll_mtu_info_req is used by:
216 * BFI_LL_H2I_MTU_INFO_REQ
217 */
218struct bfi_ll_mtu_info_req {
219 struct bfi_mhdr mh;
220 u16 mtu;
221 u8 rsvd[2];
222};
223
224/**
225 * @brief
226 * Response header format used by all responses
227 * For both responses and asynchronous notifications
228 */
229struct bfi_ll_rsp {
230 struct bfi_mhdr mh; /*!< common msg header */
231 u8 error;
232 u8 rsvd[3];
233};
234
235/**
236 * @brief bfi_ll_cee_aen is used by:
237 * BFI_LL_I2H_LINK_DOWN_AEN
238 * BFI_LL_I2H_LINK_UP_AEN
239 */
240struct bfi_ll_aen {
241 struct bfi_mhdr mh; /*!< common msg header */
242 u32 reason;
243 u8 cee_linkup;
244 u8 prio_map; /*!< LL priority bit-map */
245 u8 rsvd[2];
246};
247
248/**
249 * @brief
250 * The following error codes can be returned
251 * by the mbox commands
252 */
253enum {
254 BFI_LL_CMD_OK = 0,
255 BFI_LL_CMD_FAIL = 1,
256 BFI_LL_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
257 BFI_LL_CMD_CAM_FULL = 3, /* !< CAM is full */
258 BFI_LL_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
259 BFI_LL_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
260 BFI_LL_CMD_WAITING = 6, /* !< Waiting for completion (VMware) */
261 BFI_LL_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
262} ;
263
264/* Statistics */
265#define BFI_LL_TXF_ID_MAX 64
266#define BFI_LL_RXF_ID_MAX 64
267
268/* TxF Frame Statistics */
269struct bfi_ll_stats_txf {
270 u64 ucast_octets;
271 u64 ucast;
272 u64 ucast_vlan;
273
274 u64 mcast_octets;
275 u64 mcast;
276 u64 mcast_vlan;
277
278 u64 bcast_octets;
279 u64 bcast;
280 u64 bcast_vlan;
281
282 u64 errors;
283 u64 filter_vlan; /* frames filtered due to VLAN */
284 u64 filter_mac_sa; /* frames filtered due to SA check */
285};
286
287/* RxF Frame Statistics */
288struct bfi_ll_stats_rxf {
289 u64 ucast_octets;
290 u64 ucast;
291 u64 ucast_vlan;
292
293 u64 mcast_octets;
294 u64 mcast;
295 u64 mcast_vlan;
296
297 u64 bcast_octets;
298 u64 bcast;
299 u64 bcast_vlan;
300 u64 frame_drops;
301};
302
303/* FC Tx Frame Statistics */
304struct bfi_ll_stats_fc_tx {
305 u64 txf_ucast_octets;
306 u64 txf_ucast;
307 u64 txf_ucast_vlan;
308
309 u64 txf_mcast_octets;
310 u64 txf_mcast;
311 u64 txf_mcast_vlan;
312
313 u64 txf_bcast_octets;
314 u64 txf_bcast;
315 u64 txf_bcast_vlan;
316
317 u64 txf_parity_errors;
318 u64 txf_timeout;
319 u64 txf_fid_parity_errors;
320};
321
322/* FC Rx Frame Statistics */
323struct bfi_ll_stats_fc_rx {
324 u64 rxf_ucast_octets;
325 u64 rxf_ucast;
326 u64 rxf_ucast_vlan;
327
328 u64 rxf_mcast_octets;
329 u64 rxf_mcast;
330 u64 rxf_mcast_vlan;
331
332 u64 rxf_bcast_octets;
333 u64 rxf_bcast;
334 u64 rxf_bcast_vlan;
335};
336
337/* RAD Frame Statistics */
338struct bfi_ll_stats_rad {
339 u64 rx_frames;
340 u64 rx_octets;
341 u64 rx_vlan_frames;
342
343 u64 rx_ucast;
344 u64 rx_ucast_octets;
345 u64 rx_ucast_vlan;
346
347 u64 rx_mcast;
348 u64 rx_mcast_octets;
349 u64 rx_mcast_vlan;
350
351 u64 rx_bcast;
352 u64 rx_bcast_octets;
353 u64 rx_bcast_vlan;
354
355 u64 rx_drops;
356};
357
358/* BPC Tx Registers */
359struct bfi_ll_stats_bpc {
360 /* transmit stats */
361 u64 tx_pause[8];
362 u64 tx_zero_pause[8]; /*!< Pause cancellation */
363 /*!<Pause initiation rather than retention */
364 u64 tx_first_pause[8];
365
366 /* receive stats */
367 u64 rx_pause[8];
368 u64 rx_zero_pause[8]; /*!< Pause cancellation */
369 /*!<Pause initiation rather than retention */
370 u64 rx_first_pause[8];
371};
372
373/* MAC Rx Statistics */
374struct bfi_ll_stats_mac {
375 u64 frame_64; /* both rx and tx counter */
376 u64 frame_65_127; /* both rx and tx counter */
377 u64 frame_128_255; /* both rx and tx counter */
378 u64 frame_256_511; /* both rx and tx counter */
379 u64 frame_512_1023; /* both rx and tx counter */
380 u64 frame_1024_1518; /* both rx and tx counter */
381 u64 frame_1519_1522; /* both rx and tx counter */
382
383 /* receive stats */
384 u64 rx_bytes;
385 u64 rx_packets;
386 u64 rx_fcs_error;
387 u64 rx_multicast;
388 u64 rx_broadcast;
389 u64 rx_control_frames;
390 u64 rx_pause;
391 u64 rx_unknown_opcode;
392 u64 rx_alignment_error;
393 u64 rx_frame_length_error;
394 u64 rx_code_error;
395 u64 rx_carrier_sense_error;
396 u64 rx_undersize;
397 u64 rx_oversize;
398 u64 rx_fragments;
399 u64 rx_jabber;
400 u64 rx_drop;
401
402 /* transmit stats */
403 u64 tx_bytes;
404 u64 tx_packets;
405 u64 tx_multicast;
406 u64 tx_broadcast;
407 u64 tx_pause;
408 u64 tx_deferral;
409 u64 tx_excessive_deferral;
410 u64 tx_single_collision;
411 u64 tx_muliple_collision;
412 u64 tx_late_collision;
413 u64 tx_excessive_collision;
414 u64 tx_total_collision;
415 u64 tx_pause_honored;
416 u64 tx_drop;
417 u64 tx_jabber;
418 u64 tx_fcs_error;
419 u64 tx_control_frame;
420 u64 tx_oversize;
421 u64 tx_undersize;
422 u64 tx_fragments;
423};
424
425/* Complete statistics */
426struct bfi_ll_stats {
427 struct bfi_ll_stats_mac mac_stats;
428 struct bfi_ll_stats_bpc bpc_stats;
429 struct bfi_ll_stats_rad rad_stats;
430 struct bfi_ll_stats_fc_rx fc_rx_stats;
431 struct bfi_ll_stats_fc_tx fc_tx_stats;
432 struct bfi_ll_stats_rxf rxf_stats[BFI_LL_RXF_ID_MAX];
433 struct bfi_ll_stats_txf txf_stats[BFI_LL_TXF_ID_MAX];
434};
435
436#pragma pack()
437
438#endif /* __BFI_LL_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
new file mode 100644
index 000000000000..efacff3ab51d
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -0,0 +1,452 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/*
20 * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
21 */
22
23#ifndef __BFI_REG_H__
24#define __BFI_REG_H__
25
26#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */
27#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */
28#define HOSTFN2_INT_STATUS 0x00014300 /* ct */
29#define HOSTFN3_INT_STATUS 0x00014400 /* ct */
30#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */
31#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */
32#define HOSTFN2_INT_MSK 0x00014304 /* ct */
33#define HOSTFN3_INT_MSK 0x00014404 /* ct */
34
35#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */
36#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */
37#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */
38#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */
39
40#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */
41#define __P_LCLK_PLL_LOCK 0x80000000
42#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
43#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000
44#define __APP_PLL_LCLK_RESET_TIMER_SH 17
45#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
46#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000
47#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000
48#define __APP_PLL_LCLK_CNTLMT0_1_SH 14
49#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
50#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000
51#define __APP_PLL_LCLK_JITLMT0_1_SH 12
52#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
53#define __APP_PLL_LCLK_HREF 0x00000800
54#define __APP_PLL_LCLK_HDIV 0x00000400
55#define __APP_PLL_LCLK_P0_1_MK 0x00000300
56#define __APP_PLL_LCLK_P0_1_SH 8
57#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH)
58#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0
59#define __APP_PLL_LCLK_Z0_2_SH 5
60#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH)
61#define __APP_PLL_LCLK_RSEL200500 0x00000010
62#define __APP_PLL_LCLK_ENARST 0x00000008
63#define __APP_PLL_LCLK_BYPASS 0x00000004
64#define __APP_PLL_LCLK_LRESETN 0x00000002
65#define __APP_PLL_LCLK_ENABLE 0x00000001
66#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */
67#define __P_SCLK_PLL_LOCK 0x80000000
68#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000
69#define __APP_PLL_SCLK_RESET_TIMER_SH 17
70#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
71#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000
72#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000
73#define __APP_PLL_SCLK_CNTLMT0_1_SH 14
74#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
75#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000
76#define __APP_PLL_SCLK_JITLMT0_1_SH 12
77#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
78#define __APP_PLL_SCLK_HREF 0x00000800
79#define __APP_PLL_SCLK_HDIV 0x00000400
80#define __APP_PLL_SCLK_P0_1_MK 0x00000300
81#define __APP_PLL_SCLK_P0_1_SH 8
82#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH)
83#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0
84#define __APP_PLL_SCLK_Z0_2_SH 5
85#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH)
86#define __APP_PLL_SCLK_RSEL200500 0x00000010
87#define __APP_PLL_SCLK_ENARST 0x00000008
88#define __APP_PLL_SCLK_BYPASS 0x00000004
89#define __APP_PLL_SCLK_LRESETN 0x00000002
90#define __APP_PLL_SCLK_ENABLE 0x00000001
91#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */
92#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */
93#define __ENABLE_MAC_1 0x00200000 /* ct */
94#define __ENABLE_MAC_0 0x00100000 /* ct */
95
96#define HOST_SEM0_REG 0x00014230 /* cb/ct */
97#define HOST_SEM1_REG 0x00014234 /* cb/ct */
98#define HOST_SEM2_REG 0x00014238 /* cb/ct */
99#define HOST_SEM3_REG 0x0001423c /* cb/ct */
100#define HOST_SEM4_REG 0x00014610 /* cb/ct */
101#define HOST_SEM5_REG 0x00014614 /* cb/ct */
102#define HOST_SEM6_REG 0x00014618 /* cb/ct */
103#define HOST_SEM7_REG 0x0001461c /* cb/ct */
104#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */
105#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */
106#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */
107#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */
108#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */
109#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */
110#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */
111#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */
112
113#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */
114#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */
115#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */
116#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */
117#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */
118#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */
119#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */
120#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */
121#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */
122#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */
123#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */
124#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */
125#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */
126#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */
127#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */
128#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */
129
130#define PSS_CTL_REG 0x00018800 /* cb/ct */
131#define __PSS_I2C_CLK_DIV_MK 0x007f0000
132#define __PSS_I2C_CLK_DIV_SH 16
133#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
134#define __PSS_LMEM_INIT_DONE 0x00001000
135#define __PSS_LMEM_RESET 0x00000200
136#define __PSS_LMEM_INIT_EN 0x00000100
137#define __PSS_LPU1_RESET 0x00000002
138#define __PSS_LPU0_RESET 0x00000001
139#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
140#define ERR_SET_REG 0x00018818 /* cb/ct */
141#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
142#define __PSS_GPIO_OUT_REG 0x00000fff
143#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
144#define __PSS_GPIO_OE_REG 0x000000ff
145
146#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
147#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */
148#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */
149#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */
150#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */
151#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */
152#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */
153#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */
154
155#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */
156#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */
157#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */
158#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */
159
160#define MBIST_CTL_REG 0x00014220 /* ct */
161#define __EDRAM_BISTR_START 0x00000004
162#define MBIST_STAT_REG 0x00014224 /* ct */
163#define ETH_MAC_SER_REG 0x00014288 /* ct */
164#define __APP_EMS_CKBUFAMPIN 0x00000020
165#define __APP_EMS_REFCLKSEL 0x00000010
166#define __APP_EMS_CMLCKSEL 0x00000008
167#define __APP_EMS_REFCKBUFEN2 0x00000004
168#define __APP_EMS_REFCKBUFEN1 0x00000002
169#define __APP_EMS_CHANNEL_SEL 0x00000001
170#define FNC_PERS_REG 0x00014604 /* ct */
171#define __F3_FUNCTION_ACTIVE 0x80000000
172#define __F3_FUNCTION_MODE 0x40000000
173#define __F3_PORT_MAP_MK 0x30000000
174#define __F3_PORT_MAP_SH 28
175#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
176#define __F3_VM_MODE 0x08000000
177#define __F3_INTX_STATUS_MK 0x07000000
178#define __F3_INTX_STATUS_SH 24
179#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
180#define __F2_FUNCTION_ACTIVE 0x00800000
181#define __F2_FUNCTION_MODE 0x00400000
182#define __F2_PORT_MAP_MK 0x00300000
183#define __F2_PORT_MAP_SH 20
184#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
185#define __F2_VM_MODE 0x00080000
186#define __F2_INTX_STATUS_MK 0x00070000
187#define __F2_INTX_STATUS_SH 16
188#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
189#define __F1_FUNCTION_ACTIVE 0x00008000
190#define __F1_FUNCTION_MODE 0x00004000
191#define __F1_PORT_MAP_MK 0x00003000
192#define __F1_PORT_MAP_SH 12
193#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
194#define __F1_VM_MODE 0x00000800
195#define __F1_INTX_STATUS_MK 0x00000700
196#define __F1_INTX_STATUS_SH 8
197#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
198#define __F0_FUNCTION_ACTIVE 0x00000080
199#define __F0_FUNCTION_MODE 0x00000040
200#define __F0_PORT_MAP_MK 0x00000030
201#define __F0_PORT_MAP_SH 4
202#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
203#define __F0_VM_MODE 0x00000008
204#define __F0_INTX_STATUS 0x00000007
205enum {
206 __F0_INTX_STATUS_MSIX = 0x0,
207 __F0_INTX_STATUS_INTA = 0x1,
208 __F0_INTX_STATUS_INTB = 0x2,
209 __F0_INTX_STATUS_INTC = 0x3,
210 __F0_INTX_STATUS_INTD = 0x4,
211};
212
213#define OP_MODE 0x0001460c
214#define __APP_ETH_CLK_LOWSPEED 0x00000004
215#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
216#define __GLOBAL_FCOE_MODE 0x00000001
217#define FW_INIT_HALT_P0 0x000191ac
218#define __FW_INIT_HALT_P 0x00000001
219#define FW_INIT_HALT_P1 0x000191bc
220#define PMM_1T_RESET_REG_P0 0x0002381c
221#define __PMM_1T_RESET_P 0x00000001
222#define PMM_1T_RESET_REG_P1 0x00023c1c
223
224/**
225 * Brocade 1860 Adapter specific defines
226 */
227#define CT2_PCI_CPQ_BASE 0x00030000
228#define CT2_PCI_APP_BASE 0x00030100
229#define CT2_PCI_ETH_BASE 0x00030400
230
231/*
232 * APP block registers
233 */
234#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00)
235#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04)
236#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08)
237#define __PME_STATUS_ 0x00200000
238#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000
239#define __PF_VF_BAR_SIZE_MODE__SH 19
240#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
241#define __FC_LL_PORT_MAP__MK 0x00060000
242#define __FC_LL_PORT_MAP__SH 17
243#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH)
244#define __PF_VF_ACTIVE_ 0x00010000
245#define __PF_VF_CFG_RDY_ 0x00008000
246#define __PF_VF_ENABLE_ 0x00004000
247#define __PF_DRIVER_ACTIVE_ 0x00002000
248#define __PF_PME_SEND_ENABLE_ 0x00001000
249#define __PF_EXROM_OFFSET__MK 0x00000ff0
250#define __PF_EXROM_OFFSET__SH 4
251#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH)
252#define __FC_LL_MODE_ 0x00000008
253#define __PF_INTX_PIN_ 0x00000007
254#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C)
255#define __PF_NUM_QUEUES1__MK 0xff000000
256#define __PF_NUM_QUEUES1__SH 24
257#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH)
258#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000
259#define __PF_VF_QUE_OFFSET1__SH 16
260#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH)
261#define __PF_VF_NUM_QUEUES__MK 0x0000ff00
262#define __PF_VF_NUM_QUEUES__SH 8
263#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH)
264#define __PF_VF_QUE_OFFSET_ 0x000000ff
265#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18)
266#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
267
268/*
269 * Brocade 1860 adapter CPQ block registers
270 */
271#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
272#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
273#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40)
274#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60)
275#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80)
276#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84)
277#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88)
278#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c)
279#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90)
280#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94)
281#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98)
282#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C)
283#define CT2_HOST_SEM0_REG 0x000148f0
284#define CT2_HOST_SEM1_REG 0x000148f4
285#define CT2_HOST_SEM2_REG 0x000148f8
286#define CT2_HOST_SEM3_REG 0x000148fc
287#define CT2_HOST_SEM4_REG 0x00014900
288#define CT2_HOST_SEM5_REG 0x00014904
289#define CT2_HOST_SEM6_REG 0x00014908
290#define CT2_HOST_SEM7_REG 0x0001490c
291#define CT2_HOST_SEM0_INFO_REG 0x000148b0
292#define CT2_HOST_SEM1_INFO_REG 0x000148b4
293#define CT2_HOST_SEM2_INFO_REG 0x000148b8
294#define CT2_HOST_SEM3_INFO_REG 0x000148bc
295#define CT2_HOST_SEM4_INFO_REG 0x000148c0
296#define CT2_HOST_SEM5_INFO_REG 0x000148c4
297#define CT2_HOST_SEM6_INFO_REG 0x000148c8
298#define CT2_HOST_SEM7_INFO_REG 0x000148cc
299
300#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808
301#define __APP_LPUCLK_HALFSPEED 0x40000000
302#define __APP_PLL_LCLK_LOAD 0x20000000
303#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000
304#define __APP_PLL_LCLK_FBCNT_SH 21
305#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
306enum {
307 __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
308 __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
309};
310#define __APP_PLL_LCLK_EXTFB 0x00000800
311#define __APP_PLL_LCLK_ENOUTS 0x00000400
312#define __APP_PLL_LCLK_RATE 0x00000010
313#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c
314#define __P_SCLK_PLL_LOCK 0x80000000
315#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000
316#define __APP_PLL_SCLK_CLK_DIV2 0x20000000
317#define __APP_PLL_SCLK_LOAD 0x10000000
318#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000
319#define __APP_PLL_SCLK_FBCNT_SH 20
320#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
321enum {
322 __APP_PLL_SCLK_FBCNT_NORM = 6,
323 __APP_PLL_SCLK_FBCNT_10G_FC = 10,
324};
325#define __APP_PLL_SCLK_EXTFB 0x00000800
326#define __APP_PLL_SCLK_ENOUTS 0x00000400
327#define __APP_PLL_SCLK_RATE 0x00000010
328#define CT2_PCIE_MISC_REG 0x00014804
329#define __ETH_CLK_ENABLE_PORT1 0x00000010
330#define CT2_CHIP_MISC_PRG 0x000148a4
331#define __ETH_CLK_ENABLE_PORT0 0x00004000
332#define __APP_LPU_SPEED 0x00000002
333#define CT2_MBIST_STAT_REG 0x00014818
334#define CT2_MBIST_CTL_REG 0x0001481c
335#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c
336#define __PMM_1T_PNDB_P 0x00000002
337#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
338#define CT2_WGN_STATUS 0x00014990
339#define __A2T_AHB_LOAD 0x00000800
340#define __WGN_READY 0x00000400
341#define __GLBL_PF_VF_CFG_RDY 0x00000200
342#define CT2_NFC_CSR_SET_REG 0x00027424
343#define __HALT_NFC_CONTROLLER 0x00000002
344#define __NFC_CONTROLLER_HALTED 0x00001000
345
346#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
347#define __CSI_MAC_RESET 0x00000010
348#define __CSI_MAC_AHB_RESET 0x00000008
349#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4
350#define CT2_CSI_MAC_CONTROL_REG(__n) \
351 (CT2_CSI_MAC0_CONTROL_REG + \
352 (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
353
354/*
355 * Name semaphore registers based on usage
356 */
357#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
358#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
359#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
360#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
361#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
362#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
363
364/*
365 * CT2 semaphore register locations changed
366 */
367#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG
368#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG
369#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG
370#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG
371#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG
372#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG
373
374#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
375#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
376
377/*
378 * And corresponding host interrupt status bit field defines
379 */
380#define __HFN_INT_CPE_Q0 0x00000001U
381#define __HFN_INT_CPE_Q1 0x00000002U
382#define __HFN_INT_CPE_Q2 0x00000004U
383#define __HFN_INT_CPE_Q3 0x00000008U
384#define __HFN_INT_CPE_Q4 0x00000010U
385#define __HFN_INT_CPE_Q5 0x00000020U
386#define __HFN_INT_CPE_Q6 0x00000040U
387#define __HFN_INT_CPE_Q7 0x00000080U
388#define __HFN_INT_RME_Q0 0x00000100U
389#define __HFN_INT_RME_Q1 0x00000200U
390#define __HFN_INT_RME_Q2 0x00000400U
391#define __HFN_INT_RME_Q3 0x00000800U
392#define __HFN_INT_RME_Q4 0x00001000U
393#define __HFN_INT_RME_Q5 0x00002000U
394#define __HFN_INT_RME_Q6 0x00004000U
395#define __HFN_INT_RME_Q7 0x00008000U
396#define __HFN_INT_ERR_EMC 0x00010000U
397#define __HFN_INT_ERR_LPU0 0x00020000U
398#define __HFN_INT_ERR_LPU1 0x00040000U
399#define __HFN_INT_ERR_PSS 0x00080000U
400#define __HFN_INT_MBOX_LPU0 0x00100000U
401#define __HFN_INT_MBOX_LPU1 0x00200000U
402#define __HFN_INT_MBOX1_LPU0 0x00400000U
403#define __HFN_INT_MBOX1_LPU1 0x00800000U
404#define __HFN_INT_LL_HALT 0x01000000U
405#define __HFN_INT_CPE_MASK 0x000000ffU
406#define __HFN_INT_RME_MASK 0x0000ff00U
407#define __HFN_INT_ERR_MASK \
408 (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
409 __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
410#define __HFN_INT_FN0_MASK \
411 (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
412 __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
413 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
414#define __HFN_INT_FN1_MASK \
415 (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
416 __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
417 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
418
419/*
420 * Host interrupt status defines for 1860
421 */
422#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U
423#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U
424#define __HFN_INT_ERR_PSS_CT2 0x00040000U
425#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
426#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
427#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
428#define __HFN_INT_ERR_WGN_CT2 0x00400000U
429#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U
430#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U
431#define __HFN_INT_ERR_MASK_CT2 \
432 (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
433 __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
434 __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
435 __HFN_INT_ERR_LEHTX_CT2)
436#define __HFN_INT_FN0_MASK_CT2 \
437 (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
438 __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
439 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
440#define __HFN_INT_FN1_MASK_CT2 \
441 (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
442 __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
443 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
444
445/*
446 * asic memory map.
447 */
448#define PSS_SMEM_PAGE_START 0x8000
449#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
450#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
451
452#endif /* __BFI_REG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
new file mode 100644
index 000000000000..21e9155d6e56
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -0,0 +1,548 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __BNA_H__
14#define __BNA_H__
15
16#include "bfa_cs.h"
17#include "bfa_ioc.h"
18#include "cna.h"
19#include "bfi_ll.h"
20#include "bna_types.h"
21
22extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
23
24/**
25 *
26 * Macros and constants
27 *
28 */
29
30#define BNA_IOC_TIMER_FREQ 200
31
32/* Log string size */
33#define BNA_MESSAGE_SIZE 256
34
35/* MBOX API for PORT, TX, RX */
36#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
37do { \
38 memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \
39 (_qe)->cbfn = (_cbfn); \
40 (_qe)->cbarg = (_cbarg); \
41} while (0)
42
43#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
44
45#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
46 (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
47
48#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
49
50#define BNA_TO_POWER_OF_2(x) \
51do { \
52 int _shift = 0; \
53 while ((x) && (x) != 1) { \
54 (x) >>= 1; \
55 _shift++; \
56 } \
57 (x) <<= _shift; \
58} while (0)
59
60#define BNA_TO_POWER_OF_2_HIGH(x) \
61do { \
62 int n = 1; \
63 while (n < (x)) \
64 n <<= 1; \
65 (x) = n; \
66} while (0)
67
68/*
69 * input : _addr-> os dma addr in host endian format,
70 * output : _bna_dma_addr-> pointer to hw dma addr
71 */
72#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
73do { \
74 u64 tmp_addr = \
75 cpu_to_be64((u64)(_addr)); \
76 (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
77 (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
78} while (0)
79
80/*
81 * input : _bna_dma_addr-> pointer to hw dma addr
82 * output : _addr-> os dma addr in host endian format
83 */
84#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
85do { \
86 (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
87 | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
88} while (0)
89
90#define containing_rec(addr, type, field) \
91 ((type *)((unsigned char *)(addr) - \
92 (unsigned char *)(&((type *)0)->field)))
93
94#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
95
96/* TxQ element is 64 bytes */
97#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
98#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
99
100#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
101{ \
102 unsigned int page_index; /* index within a page */ \
103 void *page_addr; \
104 page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
105 (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
106 page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
107 (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
108}
109
110/* RxQ element is 8 bytes */
111#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
112#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
113
114#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
115{ \
116 unsigned int page_index; /* index within a page */ \
117 void *page_addr; \
118 page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
119 (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
120 page_addr = (_qpt_ptr)[((_qe_idx) >> \
121 BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
122 (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
123}
124
125/* CQ element is 16 bytes */
126#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
127#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
128
129#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
130{ \
131 unsigned int page_index; /* index within a page */ \
132 void *page_addr; \
133 \
134 page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
135 (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
136 page_addr = (_qpt_ptr)[((_qe_idx) >> \
137 BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
138 (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
139}
140
141#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
142 (&((_cast *)(_q_base))[(_qe_idx)])
143
144#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
145
146#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
147 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
148
149#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
150 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
151
152#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
153 (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
154 ((_q_depth) - 1))
155
156#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
157 ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
158 (_q_depth - 1))
159
160#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
161
162#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
163
164#define BNA_Q_PI_ADD(_q_ptr, _num) \
165 (_q_ptr)->q.producer_index = \
166 (((_q_ptr)->q.producer_index + (_num)) & \
167 ((_q_ptr)->q.q_depth - 1))
168
169#define BNA_Q_CI_ADD(_q_ptr, _num) \
170 (_q_ptr)->q.consumer_index = \
171 (((_q_ptr)->q.consumer_index + (_num)) \
172 & ((_q_ptr)->q.q_depth - 1))
173
174#define BNA_Q_FREE_COUNT(_q_ptr) \
175 (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
176
177#define BNA_Q_IN_USE_COUNT(_q_ptr) \
178 (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
179
180/* These macros build the data portion of the TxQ/RxQ doorbell */
181#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
182#define BNA_DOORBELL_Q_STOP (0x40000000)
183
184/* These macros build the data portion of the IB doorbell */
185#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
186 (0x80000000 | ((_timeout) << 16) | (_events))
187#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
188
189/* Set the coalescing timer for the given ib */
190#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
191 ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
192
193/* Acks 'events' # of events for a given ib */
194#define bna_ib_ack(_i_dbell, _events) \
195 (writel(((_i_dbell)->doorbell_ack | (_events)), \
196 (_i_dbell)->doorbell_addr));
197
198#define bna_txq_prod_indx_doorbell(_tcb) \
199 (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
200 (_tcb)->q_dbell));
201
202#define bna_rxq_prod_indx_doorbell(_rcb) \
203 (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
204 (_rcb)->q_dbell));
205
206#define BNA_LARGE_PKT_SIZE 1000
207
208#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
209do { \
210 if ((_len) > BNA_LARGE_PKT_SIZE) { \
211 (_pkt)->large_pkt_cnt++; \
212 } else { \
213 (_pkt)->small_pkt_cnt++; \
214 } \
215} while (0)
216
217#define call_rxf_stop_cbfn(rxf, status) \
218 if ((rxf)->stop_cbfn) { \
219 (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \
220 (rxf)->stop_cbfn = NULL; \
221 (rxf)->stop_cbarg = NULL; \
222 }
223
224#define call_rxf_start_cbfn(rxf, status) \
225 if ((rxf)->start_cbfn) { \
226 (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \
227 (rxf)->start_cbfn = NULL; \
228 (rxf)->start_cbarg = NULL; \
229 }
230
231#define call_rxf_cam_fltr_cbfn(rxf, status) \
232 if ((rxf)->cam_fltr_cbfn) { \
233 (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
234 (status)); \
235 (rxf)->cam_fltr_cbfn = NULL; \
236 (rxf)->cam_fltr_cbarg = NULL; \
237 }
238
239#define call_rxf_pause_cbfn(rxf, status) \
240 if ((rxf)->oper_state_cbfn) { \
241 (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
242 (status)); \
243 (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \
244 (rxf)->oper_state_cbfn = NULL; \
245 (rxf)->oper_state_cbarg = NULL; \
246 }
247
248#define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
249
250#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
251
252#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
253
254#define xxx_enable(mode, bitmask, xxx) \
255do { \
256 bitmask |= xxx; \
257 mode |= xxx; \
258} while (0)
259
260#define xxx_disable(mode, bitmask, xxx) \
261do { \
262 bitmask |= xxx; \
263 mode &= ~xxx; \
264} while (0)
265
266#define xxx_inactive(mode, bitmask, xxx) \
267do { \
268 bitmask &= ~xxx; \
269 mode &= ~xxx; \
270} while (0)
271
272#define is_promisc_enable(mode, bitmask) \
273 is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
274
275#define is_promisc_disable(mode, bitmask) \
276 is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
277
278#define promisc_enable(mode, bitmask) \
279 xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
280
281#define promisc_disable(mode, bitmask) \
282 xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
283
284#define promisc_inactive(mode, bitmask) \
285 xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
286
287#define is_default_enable(mode, bitmask) \
288 is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
289
290#define is_default_disable(mode, bitmask) \
291 is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
292
293#define default_enable(mode, bitmask) \
294 xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
295
296#define default_disable(mode, bitmask) \
297 xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
298
299#define default_inactive(mode, bitmask) \
300 xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
301
302#define is_allmulti_enable(mode, bitmask) \
303 is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
304
305#define is_allmulti_disable(mode, bitmask) \
306 is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
307
308#define allmulti_enable(mode, bitmask) \
309 xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
310
311#define allmulti_disable(mode, bitmask) \
312 xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
313
314#define allmulti_inactive(mode, bitmask) \
315 xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
316
317#define GET_RXQS(rxp, q0, q1) do { \
318 switch ((rxp)->type) { \
319 case BNA_RXP_SINGLE: \
320 (q0) = rxp->rxq.single.only; \
321 (q1) = NULL; \
322 break; \
323 case BNA_RXP_SLR: \
324 (q0) = rxp->rxq.slr.large; \
325 (q1) = rxp->rxq.slr.small; \
326 break; \
327 case BNA_RXP_HDS: \
328 (q0) = rxp->rxq.hds.data; \
329 (q1) = rxp->rxq.hds.hdr; \
330 break; \
331 } \
332} while (0)
333
334/**
335 *
336 * Function prototypes
337 *
338 */
339
340/**
341 * BNA
342 */
343
344/* APIs for BNAD */
345void bna_res_req(struct bna_res_info *res_info);
346void bna_init(struct bna *bna, struct bnad *bnad,
347 struct bfa_pcidev *pcidev,
348 struct bna_res_info *res_info);
349void bna_uninit(struct bna *bna);
350void bna_stats_get(struct bna *bna);
351void bna_get_perm_mac(struct bna *bna, u8 *mac);
352
353/* APIs for Rx */
354int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
355
356/* APIs for RxF */
357struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
358void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
359 struct bna_mac *mac);
360struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
361void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
362 struct bna_mac *mac);
363struct bna_rit_segment *
364bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
365void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
366 struct bna_rit_segment *seg);
367
368/**
369 * DEVICE
370 */
371
372/* APIs for BNAD */
373void bna_device_enable(struct bna_device *device);
374void bna_device_disable(struct bna_device *device,
375 enum bna_cleanup_type type);
376
377/**
378 * MBOX
379 */
380
381/* APIs for PORT, TX, RX */
382void bna_mbox_handler(struct bna *bna, u32 intr_status);
383void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
384
385/**
386 * PORT
387 */
388
389/* API for RX */
390int bna_port_mtu_get(struct bna_port *port);
391void bna_llport_rx_started(struct bna_llport *llport);
392void bna_llport_rx_stopped(struct bna_llport *llport);
393
394/* API for BNAD */
395void bna_port_enable(struct bna_port *port);
396void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
397 void (*cbfn)(void *, enum bna_cb_status));
398void bna_port_pause_config(struct bna_port *port,
399 struct bna_pause_config *pause_config,
400 void (*cbfn)(struct bnad *, enum bna_cb_status));
401void bna_port_mtu_set(struct bna_port *port, int mtu,
402 void (*cbfn)(struct bnad *, enum bna_cb_status));
403void bna_port_mac_get(struct bna_port *port, mac_t *mac);
404
405/* Callbacks for TX, RX */
406void bna_port_cb_tx_stopped(struct bna_port *port,
407 enum bna_cb_status status);
408void bna_port_cb_rx_stopped(struct bna_port *port,
409 enum bna_cb_status status);
410
411/**
412 * IB
413 */
414
415/* APIs for BNA */
416void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
417 struct bna_res_info *res_info);
418void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
419
420/**
421 * TX MODULE AND TX
422 */
423
424/* APIs for BNA */
425void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
426 struct bna_res_info *res_info);
427void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
428int bna_tx_state_get(struct bna_tx *tx);
429
430/* APIs for PORT */
431void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
432void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
433void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
434void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio);
435void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link);
436
437/* APIs for BNAD */
438void bna_tx_res_req(int num_txq, int txq_depth,
439 struct bna_res_info *res_info);
440struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
441 struct bna_tx_config *tx_cfg,
442 struct bna_tx_event_cbfn *tx_cbfn,
443 struct bna_res_info *res_info, void *priv);
444void bna_tx_destroy(struct bna_tx *tx);
445void bna_tx_enable(struct bna_tx *tx);
446void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
447 void (*cbfn)(void *, struct bna_tx *,
448 enum bna_cb_status));
449void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
450
451/**
452 * RX MODULE, RX, RXF
453 */
454
455/* Internal APIs */
456void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status);
457void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
458 const struct bna_mac *mac_addr);
459void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status);
460void bna_rxf_adv_init(struct bna_rxf *rxf,
461 struct bna_rx *rx,
462 struct bna_rx_config *q_config);
463int rxf_process_packet_filter_ucast(struct bna_rxf *rxf);
464int rxf_process_packet_filter_promisc(struct bna_rxf *rxf);
465int rxf_process_packet_filter_default(struct bna_rxf *rxf);
466int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf);
467int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf);
468int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf);
469int rxf_clear_packet_filter_default(struct bna_rxf *rxf);
470int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf);
471void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf);
472void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
473void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
474void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
475
476/* APIs for BNA */
477void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
478 struct bna_res_info *res_info);
479void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
480int bna_rx_state_get(struct bna_rx *rx);
481int bna_rxf_state_get(struct bna_rxf *rxf);
482
483/* APIs for PORT */
484void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
485void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
486void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
487
488/* APIs for BNAD */
489void bna_rx_res_req(struct bna_rx_config *rx_config,
490 struct bna_res_info *res_info);
491struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
492 struct bna_rx_config *rx_cfg,
493 struct bna_rx_event_cbfn *rx_cbfn,
494 struct bna_res_info *res_info, void *priv);
495void bna_rx_destroy(struct bna_rx *rx);
496void bna_rx_enable(struct bna_rx *rx);
497void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
498 void (*cbfn)(void *, struct bna_rx *,
499 enum bna_cb_status));
500void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
501void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
502void bna_rx_dim_update(struct bna_ccb *ccb);
503enum bna_cb_status
504bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
505 void (*cbfn)(struct bnad *, struct bna_rx *,
506 enum bna_cb_status));
507enum bna_cb_status
508bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
509 void (*cbfn)(struct bnad *, struct bna_rx *,
510 enum bna_cb_status));
511enum bna_cb_status
512bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
513 void (*cbfn)(struct bnad *, struct bna_rx *,
514 enum bna_cb_status));
515enum bna_cb_status
516bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
517 enum bna_rxmode bitmask,
518 void (*cbfn)(struct bnad *, struct bna_rx *,
519 enum bna_cb_status));
520void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
521void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
522void bna_rx_vlanfilter_enable(struct bna_rx *rx);
523void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
524 void (*cbfn)(struct bnad *, struct bna_rx *,
525 enum bna_cb_status));
526void bna_rx_hds_disable(struct bna_rx *rx,
527 void (*cbfn)(struct bnad *, struct bna_rx *,
528 enum bna_cb_status));
529
530/**
531 * BNAD
532 */
533
534/* Callbacks for BNA */
535void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
536 struct bna_stats *stats);
537
538/* Callbacks for DEVICE */
539void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
540void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status);
541void bnad_cb_device_enable_mbox_intr(struct bnad *bnad);
542void bnad_cb_device_disable_mbox_intr(struct bnad *bnad);
543
544/* Callbacks for port */
545void bnad_cb_port_link_status(struct bnad *bnad,
546 enum bna_link_status status);
547
548#endif /* __BNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_ctrl.c b/drivers/net/ethernet/brocade/bna/bna_ctrl.c
new file mode 100644
index 000000000000..cb2594c564dc
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_ctrl.c
@@ -0,0 +1,3076 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19#include "bfa_cs.h"
20
21static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
22
23static void
24bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
25 int status)
26{
27 int i;
28 u8 prio_map;
29
30 port->llport.link_status = BNA_LINK_UP;
31 if (aen->cee_linkup)
32 port->llport.link_status = BNA_CEE_UP;
33
34 /* Compute the priority */
35 prio_map = aen->prio_map;
36 if (prio_map) {
37 for (i = 0; i < 8; i++) {
38 if ((prio_map >> i) & 0x1)
39 break;
40 }
41 port->priority = i;
42 } else
43 port->priority = 0;
44
45 /* Dispatch events */
46 bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
47 bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
48 port->link_cbfn(port->bna->bnad, port->llport.link_status);
49}
50
51static void
52bna_port_cb_link_down(struct bna_port *port, int status)
53{
54 port->llport.link_status = BNA_LINK_DOWN;
55
56 /* Dispatch events */
57 bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
58 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
59}
60
61static inline int
62llport_can_be_up(struct bna_llport *llport)
63{
64 int ready = 0;
65 if (llport->type == BNA_PORT_T_REGULAR)
66 ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
67 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
68 (llport->flags & BNA_LLPORT_F_PORT_ENABLED));
69 else
70 ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
71 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
72 !(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
73 return ready;
74}
75
76#define llport_is_up llport_can_be_up
77
78enum bna_llport_event {
79 LLPORT_E_START = 1,
80 LLPORT_E_STOP = 2,
81 LLPORT_E_FAIL = 3,
82 LLPORT_E_UP = 4,
83 LLPORT_E_DOWN = 5,
84 LLPORT_E_FWRESP_UP_OK = 6,
85 LLPORT_E_FWRESP_UP_FAIL = 7,
86 LLPORT_E_FWRESP_DOWN = 8
87};
88
89static void
90bna_llport_cb_port_enabled(struct bna_llport *llport)
91{
92 llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
93
94 if (llport_can_be_up(llport))
95 bfa_fsm_send_event(llport, LLPORT_E_UP);
96}
97
98static void
99bna_llport_cb_port_disabled(struct bna_llport *llport)
100{
101 int llport_up = llport_is_up(llport);
102
103 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
104
105 if (llport_up)
106 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
107}
108
109/**
110 * MBOX
111 */
112static int
113bna_is_aen(u8 msg_id)
114{
115 switch (msg_id) {
116 case BFI_LL_I2H_LINK_DOWN_AEN:
117 case BFI_LL_I2H_LINK_UP_AEN:
118 case BFI_LL_I2H_PORT_ENABLE_AEN:
119 case BFI_LL_I2H_PORT_DISABLE_AEN:
120 return 1;
121
122 default:
123 return 0;
124 }
125}
126
127static void
128bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
129{
130 struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
131
132 switch (aen->mh.msg_id) {
133 case BFI_LL_I2H_LINK_UP_AEN:
134 bna_port_cb_link_up(&bna->port, aen, aen->reason);
135 break;
136 case BFI_LL_I2H_LINK_DOWN_AEN:
137 bna_port_cb_link_down(&bna->port, aen->reason);
138 break;
139 case BFI_LL_I2H_PORT_ENABLE_AEN:
140 bna_llport_cb_port_enabled(&bna->port.llport);
141 break;
142 case BFI_LL_I2H_PORT_DISABLE_AEN:
143 bna_llport_cb_port_disabled(&bna->port.llport);
144 break;
145 default:
146 break;
147 }
148}
149
150static void
151bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
152{
153 struct bna *bna = (struct bna *)(llarg);
154 struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
155 struct bfi_mhdr *cmd_h, *rsp_h;
156 struct bna_mbox_qe *mb_qe = NULL;
157 int to_post = 0;
158 u8 aen = 0;
159 char message[BNA_MESSAGE_SIZE];
160
161 aen = bna_is_aen(mb_rsp->mh.msg_id);
162
163 if (!aen) {
164 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
165 cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
166 rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
167
168 if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
169 (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
170 /* Remove the request from posted_q, update state */
171 list_del(&mb_qe->qe);
172 bna->mbox_mod.msg_pending--;
173 if (list_empty(&bna->mbox_mod.posted_q))
174 bna->mbox_mod.state = BNA_MBOX_FREE;
175 else
176 to_post = 1;
177
178 /* Dispatch the cbfn */
179 if (mb_qe->cbfn)
180 mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
181
182 /* Post the next entry, if needed */
183 if (to_post) {
184 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
185 bfa_nw_ioc_mbox_queue(&bna->device.ioc,
186 &mb_qe->cmd);
187 }
188 } else {
189 snprintf(message, BNA_MESSAGE_SIZE,
190 "No matching rsp for [%d:%d:%d]\n",
191 mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
192 mb_rsp->mh.mtag.i2htok);
193 pr_info("%s", message);
194 }
195
196 } else
197 bna_mbox_aen_callback(bna, msg);
198}
199
200static void
201bna_err_handler(struct bna *bna, u32 intr_status)
202{
203 u32 init_halt;
204
205 if (intr_status & __HALT_STATUS_BITS) {
206 init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
207 init_halt &= ~__FW_INIT_HALT_P;
208 writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
209 }
210
211 bfa_nw_ioc_error_isr(&bna->device.ioc);
212}
213
214void
215bna_mbox_handler(struct bna *bna, u32 intr_status)
216{
217 if (BNA_IS_ERR_INTR(intr_status)) {
218 bna_err_handler(bna, intr_status);
219 return;
220 }
221 if (BNA_IS_MBOX_INTR(intr_status))
222 bfa_nw_ioc_mbox_isr(&bna->device.ioc);
223}
224
225void
226bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
227{
228 struct bfi_mhdr *mh;
229
230 mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
231
232 mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
233 bna->mbox_mod.msg_ctr++;
234 bna->mbox_mod.msg_pending++;
235 if (bna->mbox_mod.state == BNA_MBOX_FREE) {
236 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
237 bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
238 bna->mbox_mod.state = BNA_MBOX_POSTED;
239 } else {
240 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
241 }
242}
243
244static void
245bna_mbox_flush_q(struct bna *bna, struct list_head *q)
246{
247 struct bna_mbox_qe *mb_qe = NULL;
248 struct list_head *mb_q;
249 void (*cbfn)(void *arg, int status);
250 void *cbarg;
251
252 mb_q = &bna->mbox_mod.posted_q;
253
254 while (!list_empty(mb_q)) {
255 bfa_q_deq(mb_q, &mb_qe);
256 cbfn = mb_qe->cbfn;
257 cbarg = mb_qe->cbarg;
258 bfa_q_qe_init(mb_qe);
259 bna->mbox_mod.msg_pending--;
260
261 if (cbfn)
262 cbfn(cbarg, BNA_CB_NOT_EXEC);
263 }
264
265 bna->mbox_mod.state = BNA_MBOX_FREE;
266}
267
268static void
269bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
270{
271}
272
273static void
274bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
275{
276 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
277}
278
279static void
280bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
281{
282 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
283 mbox_mod->state = BNA_MBOX_FREE;
284 mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
285 INIT_LIST_HEAD(&mbox_mod->posted_q);
286 mbox_mod->bna = bna;
287}
288
289static void
290bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
291{
292 mbox_mod->bna = NULL;
293}
294
295/**
296 * LLPORT
297 */
298#define call_llport_stop_cbfn(llport, status)\
299do {\
300 if ((llport)->stop_cbfn)\
301 (llport)->stop_cbfn(&(llport)->bna->port, status);\
302 (llport)->stop_cbfn = NULL;\
303} while (0)
304
305static void bna_fw_llport_up(struct bna_llport *llport);
306static void bna_fw_cb_llport_up(void *arg, int status);
307static void bna_fw_llport_down(struct bna_llport *llport);
308static void bna_fw_cb_llport_down(void *arg, int status);
309static void bna_llport_start(struct bna_llport *llport);
310static void bna_llport_stop(struct bna_llport *llport);
311static void bna_llport_fail(struct bna_llport *llport);
312
313enum bna_llport_state {
314 BNA_LLPORT_STOPPED = 1,
315 BNA_LLPORT_DOWN = 2,
316 BNA_LLPORT_UP_RESP_WAIT = 3,
317 BNA_LLPORT_DOWN_RESP_WAIT = 4,
318 BNA_LLPORT_UP = 5,
319 BNA_LLPORT_LAST_RESP_WAIT = 6
320};
321
322bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
323 enum bna_llport_event);
324bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
325 enum bna_llport_event);
326bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
327 enum bna_llport_event);
328bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
329 enum bna_llport_event);
330bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
331 enum bna_llport_event);
332bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
333 enum bna_llport_event);
334
335static struct bfa_sm_table llport_sm_table[] = {
336 {BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
337 {BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
338 {BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
339 {BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
340 {BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
341 {BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
342};
343
344static void
345bna_llport_sm_stopped_entry(struct bna_llport *llport)
346{
347 llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
348 call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
349}
350
351static void
352bna_llport_sm_stopped(struct bna_llport *llport,
353 enum bna_llport_event event)
354{
355 switch (event) {
356 case LLPORT_E_START:
357 bfa_fsm_set_state(llport, bna_llport_sm_down);
358 break;
359
360 case LLPORT_E_STOP:
361 call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
362 break;
363
364 case LLPORT_E_FAIL:
365 break;
366
367 case LLPORT_E_DOWN:
368 /* This event is received due to Rx objects failing */
369 /* No-op */
370 break;
371
372 case LLPORT_E_FWRESP_UP_OK:
373 case LLPORT_E_FWRESP_DOWN:
374 /**
375 * These events are received due to flushing of mbox when
376 * device fails
377 */
378 /* No-op */
379 break;
380
381 default:
382 bfa_sm_fault(event);
383 }
384}
385
386static void
387bna_llport_sm_down_entry(struct bna_llport *llport)
388{
389 bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
390}
391
392static void
393bna_llport_sm_down(struct bna_llport *llport,
394 enum bna_llport_event event)
395{
396 switch (event) {
397 case LLPORT_E_STOP:
398 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
399 break;
400
401 case LLPORT_E_FAIL:
402 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
403 break;
404
405 case LLPORT_E_UP:
406 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
407 bna_fw_llport_up(llport);
408 break;
409
410 default:
411 bfa_sm_fault(event);
412 }
413}
414
415static void
416bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
417{
418 BUG_ON(!llport_can_be_up(llport));
419 /**
420 * NOTE: Do not call bna_fw_llport_up() here. That will over step
421 * mbox due to down_resp_wait -> up_resp_wait transition on event
422 * LLPORT_E_UP
423 */
424}
425
426static void
427bna_llport_sm_up_resp_wait(struct bna_llport *llport,
428 enum bna_llport_event event)
429{
430 switch (event) {
431 case LLPORT_E_STOP:
432 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
433 break;
434
435 case LLPORT_E_FAIL:
436 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
437 break;
438
439 case LLPORT_E_DOWN:
440 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
441 break;
442
443 case LLPORT_E_FWRESP_UP_OK:
444 bfa_fsm_set_state(llport, bna_llport_sm_up);
445 break;
446
447 case LLPORT_E_FWRESP_UP_FAIL:
448 bfa_fsm_set_state(llport, bna_llport_sm_down);
449 break;
450
451 case LLPORT_E_FWRESP_DOWN:
452 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
453 bna_fw_llport_up(llport);
454 break;
455
456 default:
457 bfa_sm_fault(event);
458 }
459}
460
461static void
462bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
463{
464 /**
465 * NOTE: Do not call bna_fw_llport_down() here. That will over step
466 * mbox due to up_resp_wait -> down_resp_wait transition on event
467 * LLPORT_E_DOWN
468 */
469}
470
471static void
472bna_llport_sm_down_resp_wait(struct bna_llport *llport,
473 enum bna_llport_event event)
474{
475 switch (event) {
476 case LLPORT_E_STOP:
477 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
478 break;
479
480 case LLPORT_E_FAIL:
481 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
482 break;
483
484 case LLPORT_E_UP:
485 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
486 break;
487
488 case LLPORT_E_FWRESP_UP_OK:
489 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
490 bna_fw_llport_down(llport);
491 break;
492
493 case LLPORT_E_FWRESP_UP_FAIL:
494 case LLPORT_E_FWRESP_DOWN:
495 bfa_fsm_set_state(llport, bna_llport_sm_down);
496 break;
497
498 default:
499 bfa_sm_fault(event);
500 }
501}
502
503static void
504bna_llport_sm_up_entry(struct bna_llport *llport)
505{
506}
507
508static void
509bna_llport_sm_up(struct bna_llport *llport,
510 enum bna_llport_event event)
511{
512 switch (event) {
513 case LLPORT_E_STOP:
514 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
515 bna_fw_llport_down(llport);
516 break;
517
518 case LLPORT_E_FAIL:
519 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
520 break;
521
522 case LLPORT_E_DOWN:
523 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
524 bna_fw_llport_down(llport);
525 break;
526
527 default:
528 bfa_sm_fault(event);
529 }
530}
531
532static void
533bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
534{
535}
536
537static void
538bna_llport_sm_last_resp_wait(struct bna_llport *llport,
539 enum bna_llport_event event)
540{
541 switch (event) {
542 case LLPORT_E_FAIL:
543 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
544 break;
545
546 case LLPORT_E_DOWN:
547 /**
548 * This event is received due to Rx objects stopping in
549 * parallel to llport
550 */
551 /* No-op */
552 break;
553
554 case LLPORT_E_FWRESP_UP_OK:
555 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
556 bna_fw_llport_down(llport);
557 break;
558
559 case LLPORT_E_FWRESP_UP_FAIL:
560 case LLPORT_E_FWRESP_DOWN:
561 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
562 break;
563
564 default:
565 bfa_sm_fault(event);
566 }
567}
568
569static void
570bna_fw_llport_admin_up(struct bna_llport *llport)
571{
572 struct bfi_ll_port_admin_req ll_req;
573
574 memset(&ll_req, 0, sizeof(ll_req));
575 ll_req.mh.msg_class = BFI_MC_LL;
576 ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
577 ll_req.mh.mtag.h2i.lpu_id = 0;
578
579 ll_req.up = BNA_STATUS_T_ENABLED;
580
581 bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
582 bna_fw_cb_llport_up, llport);
583
584 bna_mbox_send(llport->bna, &llport->mbox_qe);
585}
586
587static void
588bna_fw_llport_up(struct bna_llport *llport)
589{
590 if (llport->type == BNA_PORT_T_REGULAR)
591 bna_fw_llport_admin_up(llport);
592}
593
594static void
595bna_fw_cb_llport_up(void *arg, int status)
596{
597 struct bna_llport *llport = (struct bna_llport *)arg;
598
599 bfa_q_qe_init(&llport->mbox_qe.qe);
600 if (status == BFI_LL_CMD_FAIL) {
601 if (llport->type == BNA_PORT_T_REGULAR)
602 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
603 else
604 llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
605 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
606 } else
607 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
608}
609
610static void
611bna_fw_llport_admin_down(struct bna_llport *llport)
612{
613 struct bfi_ll_port_admin_req ll_req;
614
615 memset(&ll_req, 0, sizeof(ll_req));
616 ll_req.mh.msg_class = BFI_MC_LL;
617 ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
618 ll_req.mh.mtag.h2i.lpu_id = 0;
619
620 ll_req.up = BNA_STATUS_T_DISABLED;
621
622 bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
623 bna_fw_cb_llport_down, llport);
624
625 bna_mbox_send(llport->bna, &llport->mbox_qe);
626}
627
628static void
629bna_fw_llport_down(struct bna_llport *llport)
630{
631 if (llport->type == BNA_PORT_T_REGULAR)
632 bna_fw_llport_admin_down(llport);
633}
634
635static void
636bna_fw_cb_llport_down(void *arg, int status)
637{
638 struct bna_llport *llport = (struct bna_llport *)arg;
639
640 bfa_q_qe_init(&llport->mbox_qe.qe);
641 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
642}
643
644static void
645bna_port_cb_llport_stopped(struct bna_port *port,
646 enum bna_cb_status status)
647{
648 bfa_wc_down(&port->chld_stop_wc);
649}
650
651static void
652bna_llport_init(struct bna_llport *llport, struct bna *bna)
653{
654 llport->flags |= BNA_LLPORT_F_ADMIN_UP;
655 llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
656 llport->type = BNA_PORT_T_REGULAR;
657 llport->bna = bna;
658
659 llport->link_status = BNA_LINK_DOWN;
660
661 llport->rx_started_count = 0;
662
663 llport->stop_cbfn = NULL;
664
665 bfa_q_qe_init(&llport->mbox_qe.qe);
666
667 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
668}
669
670static void
671bna_llport_uninit(struct bna_llport *llport)
672{
673 llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
674 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
675
676 llport->bna = NULL;
677}
678
679static void
680bna_llport_start(struct bna_llport *llport)
681{
682 bfa_fsm_send_event(llport, LLPORT_E_START);
683}
684
685static void
686bna_llport_stop(struct bna_llport *llport)
687{
688 llport->stop_cbfn = bna_port_cb_llport_stopped;
689
690 bfa_fsm_send_event(llport, LLPORT_E_STOP);
691}
692
693static void
694bna_llport_fail(struct bna_llport *llport)
695{
696 /* Reset the physical port status to enabled */
697 llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
698 bfa_fsm_send_event(llport, LLPORT_E_FAIL);
699}
700
701static int
702bna_llport_state_get(struct bna_llport *llport)
703{
704 return bfa_sm_to_state(llport_sm_table, llport->fsm);
705}
706
707void
708bna_llport_rx_started(struct bna_llport *llport)
709{
710 llport->rx_started_count++;
711
712 if (llport->rx_started_count == 1) {
713
714 llport->flags |= BNA_LLPORT_F_RX_STARTED;
715
716 if (llport_can_be_up(llport))
717 bfa_fsm_send_event(llport, LLPORT_E_UP);
718 }
719}
720
721void
722bna_llport_rx_stopped(struct bna_llport *llport)
723{
724 int llport_up = llport_is_up(llport);
725
726 llport->rx_started_count--;
727
728 if (llport->rx_started_count == 0) {
729
730 llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
731
732 if (llport_up)
733 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
734 }
735}
736
737/**
738 * PORT
739 */
740#define bna_port_chld_start(port)\
741do {\
742 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
743 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
744 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
745 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
746 bna_llport_start(&(port)->llport);\
747 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
748 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
749} while (0)
750
751#define bna_port_chld_stop(port)\
752do {\
753 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
754 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
755 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
756 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
757 bfa_wc_up(&(port)->chld_stop_wc);\
758 bfa_wc_up(&(port)->chld_stop_wc);\
759 bfa_wc_up(&(port)->chld_stop_wc);\
760 bna_llport_stop(&(port)->llport);\
761 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
762 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
763} while (0)
764
765#define bna_port_chld_fail(port)\
766do {\
767 bna_llport_fail(&(port)->llport);\
768 bna_tx_mod_fail(&(port)->bna->tx_mod);\
769 bna_rx_mod_fail(&(port)->bna->rx_mod);\
770} while (0)
771
772#define bna_port_rx_start(port)\
773do {\
774 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
775 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
776 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
777} while (0)
778
779#define bna_port_rx_stop(port)\
780do {\
781 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
782 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
783 bfa_wc_up(&(port)->chld_stop_wc);\
784 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
785} while (0)
786
787#define call_port_stop_cbfn(port, status)\
788do {\
789 if ((port)->stop_cbfn)\
790 (port)->stop_cbfn((port)->stop_cbarg, status);\
791 (port)->stop_cbfn = NULL;\
792 (port)->stop_cbarg = NULL;\
793} while (0)
794
795#define call_port_pause_cbfn(port, status)\
796do {\
797 if ((port)->pause_cbfn)\
798 (port)->pause_cbfn((port)->bna->bnad, status);\
799 (port)->pause_cbfn = NULL;\
800} while (0)
801
802#define call_port_mtu_cbfn(port, status)\
803do {\
804 if ((port)->mtu_cbfn)\
805 (port)->mtu_cbfn((port)->bna->bnad, status);\
806 (port)->mtu_cbfn = NULL;\
807} while (0)
808
809static void bna_fw_pause_set(struct bna_port *port);
810static void bna_fw_cb_pause_set(void *arg, int status);
811static void bna_fw_mtu_set(struct bna_port *port);
812static void bna_fw_cb_mtu_set(void *arg, int status);
813
814enum bna_port_event {
815 PORT_E_START = 1,
816 PORT_E_STOP = 2,
817 PORT_E_FAIL = 3,
818 PORT_E_PAUSE_CFG = 4,
819 PORT_E_MTU_CFG = 5,
820 PORT_E_CHLD_STOPPED = 6,
821 PORT_E_FWRESP_PAUSE = 7,
822 PORT_E_FWRESP_MTU = 8
823};
824
825enum bna_port_state {
826 BNA_PORT_STOPPED = 1,
827 BNA_PORT_MTU_INIT_WAIT = 2,
828 BNA_PORT_PAUSE_INIT_WAIT = 3,
829 BNA_PORT_LAST_RESP_WAIT = 4,
830 BNA_PORT_STARTED = 5,
831 BNA_PORT_PAUSE_CFG_WAIT = 6,
832 BNA_PORT_RX_STOP_WAIT = 7,
833 BNA_PORT_MTU_CFG_WAIT = 8,
834 BNA_PORT_CHLD_STOP_WAIT = 9
835};
836
837bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
838 enum bna_port_event);
839bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
840 enum bna_port_event);
841bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
842 enum bna_port_event);
843bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
844 enum bna_port_event);
845bfa_fsm_state_decl(bna_port, started, struct bna_port,
846 enum bna_port_event);
847bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
848 enum bna_port_event);
849bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
850 enum bna_port_event);
851bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
852 enum bna_port_event);
853bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
854 enum bna_port_event);
855
856static struct bfa_sm_table port_sm_table[] = {
857 {BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
858 {BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
859 {BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
860 {BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
861 {BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
862 {BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
863 {BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
864 {BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
865 {BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
866};
867
868static void
869bna_port_sm_stopped_entry(struct bna_port *port)
870{
871 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
872 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
873 call_port_stop_cbfn(port, BNA_CB_SUCCESS);
874}
875
876static void
877bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
878{
879 switch (event) {
880 case PORT_E_START:
881 bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
882 break;
883
884 case PORT_E_STOP:
885 call_port_stop_cbfn(port, BNA_CB_SUCCESS);
886 break;
887
888 case PORT_E_FAIL:
889 /* No-op */
890 break;
891
892 case PORT_E_PAUSE_CFG:
893 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
894 break;
895
896 case PORT_E_MTU_CFG:
897 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
898 break;
899
900 case PORT_E_CHLD_STOPPED:
901 /**
902 * This event is received due to LLPort, Tx and Rx objects
903 * failing
904 */
905 /* No-op */
906 break;
907
908 case PORT_E_FWRESP_PAUSE:
909 case PORT_E_FWRESP_MTU:
910 /**
911 * These events are received due to flushing of mbox when
912 * device fails
913 */
914 /* No-op */
915 break;
916
917 default:
918 bfa_sm_fault(event);
919 }
920}
921
922static void
923bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
924{
925 bna_fw_mtu_set(port);
926}
927
928static void
929bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
930{
931 switch (event) {
932 case PORT_E_STOP:
933 bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
934 break;
935
936 case PORT_E_FAIL:
937 bfa_fsm_set_state(port, bna_port_sm_stopped);
938 break;
939
940 case PORT_E_PAUSE_CFG:
941 /* No-op */
942 break;
943
944 case PORT_E_MTU_CFG:
945 port->flags |= BNA_PORT_F_MTU_CHANGED;
946 break;
947
948 case PORT_E_FWRESP_MTU:
949 if (port->flags & BNA_PORT_F_MTU_CHANGED) {
950 port->flags &= ~BNA_PORT_F_MTU_CHANGED;
951 bna_fw_mtu_set(port);
952 } else {
953 bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
954 }
955 break;
956
957 default:
958 bfa_sm_fault(event);
959 }
960}
961
962static void
963bna_port_sm_pause_init_wait_entry(struct bna_port *port)
964{
965 bna_fw_pause_set(port);
966}
967
968static void
969bna_port_sm_pause_init_wait(struct bna_port *port,
970 enum bna_port_event event)
971{
972 switch (event) {
973 case PORT_E_STOP:
974 bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
975 break;
976
977 case PORT_E_FAIL:
978 bfa_fsm_set_state(port, bna_port_sm_stopped);
979 break;
980
981 case PORT_E_PAUSE_CFG:
982 port->flags |= BNA_PORT_F_PAUSE_CHANGED;
983 break;
984
985 case PORT_E_MTU_CFG:
986 port->flags |= BNA_PORT_F_MTU_CHANGED;
987 break;
988
989 case PORT_E_FWRESP_PAUSE:
990 if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
991 port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
992 bna_fw_pause_set(port);
993 } else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
994 port->flags &= ~BNA_PORT_F_MTU_CHANGED;
995 bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
996 } else {
997 bfa_fsm_set_state(port, bna_port_sm_started);
998 bna_port_chld_start(port);
999 }
1000 break;
1001
1002 default:
1003 bfa_sm_fault(event);
1004 }
1005}
1006
1007static void
1008bna_port_sm_last_resp_wait_entry(struct bna_port *port)
1009{
1010}
1011
1012static void
1013bna_port_sm_last_resp_wait(struct bna_port *port,
1014 enum bna_port_event event)
1015{
1016 switch (event) {
1017 case PORT_E_FAIL:
1018 case PORT_E_FWRESP_PAUSE:
1019 case PORT_E_FWRESP_MTU:
1020 bfa_fsm_set_state(port, bna_port_sm_stopped);
1021 break;
1022
1023 default:
1024 bfa_sm_fault(event);
1025 }
1026}
1027
1028static void
1029bna_port_sm_started_entry(struct bna_port *port)
1030{
1031 /**
1032 * NOTE: Do not call bna_port_chld_start() here, since it will be
1033 * inadvertently called during pause_cfg_wait->started transition
1034 * as well
1035 */
1036 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
1037 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
1038}
1039
1040static void
1041bna_port_sm_started(struct bna_port *port,
1042 enum bna_port_event event)
1043{
1044 switch (event) {
1045 case PORT_E_STOP:
1046 bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
1047 break;
1048
1049 case PORT_E_FAIL:
1050 bfa_fsm_set_state(port, bna_port_sm_stopped);
1051 bna_port_chld_fail(port);
1052 break;
1053
1054 case PORT_E_PAUSE_CFG:
1055 bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
1056 break;
1057
1058 case PORT_E_MTU_CFG:
1059 bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
1060 break;
1061
1062 default:
1063 bfa_sm_fault(event);
1064 }
1065}
1066
1067static void
1068bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
1069{
1070 bna_fw_pause_set(port);
1071}
1072
1073static void
1074bna_port_sm_pause_cfg_wait(struct bna_port *port,
1075 enum bna_port_event event)
1076{
1077 switch (event) {
1078 case PORT_E_FAIL:
1079 bfa_fsm_set_state(port, bna_port_sm_stopped);
1080 bna_port_chld_fail(port);
1081 break;
1082
1083 case PORT_E_FWRESP_PAUSE:
1084 bfa_fsm_set_state(port, bna_port_sm_started);
1085 break;
1086
1087 default:
1088 bfa_sm_fault(event);
1089 }
1090}
1091
1092static void
1093bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
1094{
1095 bna_port_rx_stop(port);
1096}
1097
1098static void
1099bna_port_sm_rx_stop_wait(struct bna_port *port,
1100 enum bna_port_event event)
1101{
1102 switch (event) {
1103 case PORT_E_FAIL:
1104 bfa_fsm_set_state(port, bna_port_sm_stopped);
1105 bna_port_chld_fail(port);
1106 break;
1107
1108 case PORT_E_CHLD_STOPPED:
1109 bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
1110 break;
1111
1112 default:
1113 bfa_sm_fault(event);
1114 }
1115}
1116
1117static void
1118bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
1119{
1120 bna_fw_mtu_set(port);
1121}
1122
1123static void
1124bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
1125{
1126 switch (event) {
1127 case PORT_E_FAIL:
1128 bfa_fsm_set_state(port, bna_port_sm_stopped);
1129 bna_port_chld_fail(port);
1130 break;
1131
1132 case PORT_E_FWRESP_MTU:
1133 bfa_fsm_set_state(port, bna_port_sm_started);
1134 bna_port_rx_start(port);
1135 break;
1136
1137 default:
1138 bfa_sm_fault(event);
1139 }
1140}
1141
1142static void
1143bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
1144{
1145 bna_port_chld_stop(port);
1146}
1147
1148static void
1149bna_port_sm_chld_stop_wait(struct bna_port *port,
1150 enum bna_port_event event)
1151{
1152 switch (event) {
1153 case PORT_E_FAIL:
1154 bfa_fsm_set_state(port, bna_port_sm_stopped);
1155 bna_port_chld_fail(port);
1156 break;
1157
1158 case PORT_E_CHLD_STOPPED:
1159 bfa_fsm_set_state(port, bna_port_sm_stopped);
1160 break;
1161
1162 default:
1163 bfa_sm_fault(event);
1164 }
1165}
1166
1167static void
1168bna_fw_pause_set(struct bna_port *port)
1169{
1170 struct bfi_ll_set_pause_req ll_req;
1171
1172 memset(&ll_req, 0, sizeof(ll_req));
1173 ll_req.mh.msg_class = BFI_MC_LL;
1174 ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
1175 ll_req.mh.mtag.h2i.lpu_id = 0;
1176
1177 ll_req.tx_pause = port->pause_config.tx_pause;
1178 ll_req.rx_pause = port->pause_config.rx_pause;
1179
1180 bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1181 bna_fw_cb_pause_set, port);
1182
1183 bna_mbox_send(port->bna, &port->mbox_qe);
1184}
1185
1186static void
1187bna_fw_cb_pause_set(void *arg, int status)
1188{
1189 struct bna_port *port = (struct bna_port *)arg;
1190
1191 bfa_q_qe_init(&port->mbox_qe.qe);
1192 bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
1193}
1194
1195void
1196bna_fw_mtu_set(struct bna_port *port)
1197{
1198 struct bfi_ll_mtu_info_req ll_req;
1199
1200 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
1201 ll_req.mtu = htons((u16)port->mtu);
1202
1203 bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1204 bna_fw_cb_mtu_set, port);
1205 bna_mbox_send(port->bna, &port->mbox_qe);
1206}
1207
1208void
1209bna_fw_cb_mtu_set(void *arg, int status)
1210{
1211 struct bna_port *port = (struct bna_port *)arg;
1212
1213 bfa_q_qe_init(&port->mbox_qe.qe);
1214 bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
1215}
1216
1217static void
1218bna_port_cb_chld_stopped(void *arg)
1219{
1220 struct bna_port *port = (struct bna_port *)arg;
1221
1222 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
1223}
1224
1225static void
1226bna_port_init(struct bna_port *port, struct bna *bna)
1227{
1228 port->bna = bna;
1229 port->flags = 0;
1230 port->mtu = 0;
1231 port->type = BNA_PORT_T_REGULAR;
1232
1233 port->link_cbfn = bnad_cb_port_link_status;
1234
1235 port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
1236 port->chld_stop_wc.wc_cbarg = port;
1237 port->chld_stop_wc.wc_count = 0;
1238
1239 port->stop_cbfn = NULL;
1240 port->stop_cbarg = NULL;
1241
1242 port->pause_cbfn = NULL;
1243
1244 port->mtu_cbfn = NULL;
1245
1246 bfa_q_qe_init(&port->mbox_qe.qe);
1247
1248 bfa_fsm_set_state(port, bna_port_sm_stopped);
1249
1250 bna_llport_init(&port->llport, bna);
1251}
1252
1253static void
1254bna_port_uninit(struct bna_port *port)
1255{
1256 bna_llport_uninit(&port->llport);
1257
1258 port->flags = 0;
1259
1260 port->bna = NULL;
1261}
1262
1263static int
1264bna_port_state_get(struct bna_port *port)
1265{
1266 return bfa_sm_to_state(port_sm_table, port->fsm);
1267}
1268
1269static void
1270bna_port_start(struct bna_port *port)
1271{
1272 port->flags |= BNA_PORT_F_DEVICE_READY;
1273 if (port->flags & BNA_PORT_F_ENABLED)
1274 bfa_fsm_send_event(port, PORT_E_START);
1275}
1276
1277static void
1278bna_port_stop(struct bna_port *port)
1279{
1280 port->stop_cbfn = bna_device_cb_port_stopped;
1281 port->stop_cbarg = &port->bna->device;
1282
1283 port->flags &= ~BNA_PORT_F_DEVICE_READY;
1284 bfa_fsm_send_event(port, PORT_E_STOP);
1285}
1286
1287static void
1288bna_port_fail(struct bna_port *port)
1289{
1290 port->flags &= ~BNA_PORT_F_DEVICE_READY;
1291 bfa_fsm_send_event(port, PORT_E_FAIL);
1292}
1293
1294void
1295bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
1296{
1297 bfa_wc_down(&port->chld_stop_wc);
1298}
1299
1300void
1301bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
1302{
1303 bfa_wc_down(&port->chld_stop_wc);
1304}
1305
1306int
1307bna_port_mtu_get(struct bna_port *port)
1308{
1309 return port->mtu;
1310}
1311
1312void
1313bna_port_enable(struct bna_port *port)
1314{
1315 if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
1316 return;
1317
1318 port->flags |= BNA_PORT_F_ENABLED;
1319
1320 if (port->flags & BNA_PORT_F_DEVICE_READY)
1321 bfa_fsm_send_event(port, PORT_E_START);
1322}
1323
1324void
1325bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
1326 void (*cbfn)(void *, enum bna_cb_status))
1327{
1328 if (type == BNA_SOFT_CLEANUP) {
1329 (*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
1330 return;
1331 }
1332
1333 port->stop_cbfn = cbfn;
1334 port->stop_cbarg = port->bna->bnad;
1335
1336 port->flags &= ~BNA_PORT_F_ENABLED;
1337
1338 bfa_fsm_send_event(port, PORT_E_STOP);
1339}
1340
1341void
1342bna_port_pause_config(struct bna_port *port,
1343 struct bna_pause_config *pause_config,
1344 void (*cbfn)(struct bnad *, enum bna_cb_status))
1345{
1346 port->pause_config = *pause_config;
1347
1348 port->pause_cbfn = cbfn;
1349
1350 bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
1351}
1352
1353void
1354bna_port_mtu_set(struct bna_port *port, int mtu,
1355 void (*cbfn)(struct bnad *, enum bna_cb_status))
1356{
1357 port->mtu = mtu;
1358
1359 port->mtu_cbfn = cbfn;
1360
1361 bfa_fsm_send_event(port, PORT_E_MTU_CFG);
1362}
1363
1364void
1365bna_port_mac_get(struct bna_port *port, mac_t *mac)
1366{
1367 *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
1368}
1369
1370/**
1371 * DEVICE
1372 */
1373#define enable_mbox_intr(_device)\
1374do {\
1375 u32 intr_status;\
1376 bna_intr_status_get((_device)->bna, intr_status);\
1377 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1378 bna_mbox_intr_enable((_device)->bna);\
1379} while (0)
1380
1381#define disable_mbox_intr(_device)\
1382do {\
1383 bna_mbox_intr_disable((_device)->bna);\
1384 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1385} while (0)
1386
1387static const struct bna_chip_regs_offset reg_offset[] =
1388{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
1389 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
1390{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
1391 HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
1392{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
1393 HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
1394{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
1395 HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
1396};
1397
1398enum bna_device_event {
1399 DEVICE_E_ENABLE = 1,
1400 DEVICE_E_DISABLE = 2,
1401 DEVICE_E_IOC_READY = 3,
1402 DEVICE_E_IOC_FAILED = 4,
1403 DEVICE_E_IOC_DISABLED = 5,
1404 DEVICE_E_IOC_RESET = 6,
1405 DEVICE_E_PORT_STOPPED = 7,
1406};
1407
1408enum bna_device_state {
1409 BNA_DEVICE_STOPPED = 1,
1410 BNA_DEVICE_IOC_READY_WAIT = 2,
1411 BNA_DEVICE_READY = 3,
1412 BNA_DEVICE_PORT_STOP_WAIT = 4,
1413 BNA_DEVICE_IOC_DISABLE_WAIT = 5,
1414 BNA_DEVICE_FAILED = 6
1415};
1416
1417bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
1418 enum bna_device_event);
1419bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
1420 enum bna_device_event);
1421bfa_fsm_state_decl(bna_device, ready, struct bna_device,
1422 enum bna_device_event);
1423bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
1424 enum bna_device_event);
1425bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
1426 enum bna_device_event);
1427bfa_fsm_state_decl(bna_device, failed, struct bna_device,
1428 enum bna_device_event);
1429
1430static struct bfa_sm_table device_sm_table[] = {
1431 {BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
1432 {BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
1433 {BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
1434 {BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
1435 {BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
1436 {BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
1437};
1438
1439static void
1440bna_device_sm_stopped_entry(struct bna_device *device)
1441{
1442 if (device->stop_cbfn)
1443 device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
1444
1445 device->stop_cbfn = NULL;
1446 device->stop_cbarg = NULL;
1447}
1448
1449static void
1450bna_device_sm_stopped(struct bna_device *device,
1451 enum bna_device_event event)
1452{
1453 switch (event) {
1454 case DEVICE_E_ENABLE:
1455 if (device->intr_type == BNA_INTR_T_MSIX)
1456 bna_mbox_msix_idx_set(device);
1457 bfa_nw_ioc_enable(&device->ioc);
1458 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1459 break;
1460
1461 case DEVICE_E_DISABLE:
1462 bfa_fsm_set_state(device, bna_device_sm_stopped);
1463 break;
1464
1465 case DEVICE_E_IOC_RESET:
1466 enable_mbox_intr(device);
1467 break;
1468
1469 case DEVICE_E_IOC_FAILED:
1470 bfa_fsm_set_state(device, bna_device_sm_failed);
1471 break;
1472
1473 default:
1474 bfa_sm_fault(event);
1475 }
1476}
1477
1478static void
1479bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
1480{
1481 /**
1482 * Do not call bfa_ioc_enable() here. It must be called in the
1483 * previous state due to failed -> ioc_ready_wait transition.
1484 */
1485}
1486
1487static void
1488bna_device_sm_ioc_ready_wait(struct bna_device *device,
1489 enum bna_device_event event)
1490{
1491 switch (event) {
1492 case DEVICE_E_DISABLE:
1493 if (device->ready_cbfn)
1494 device->ready_cbfn(device->ready_cbarg,
1495 BNA_CB_INTERRUPT);
1496 device->ready_cbfn = NULL;
1497 device->ready_cbarg = NULL;
1498 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1499 break;
1500
1501 case DEVICE_E_IOC_READY:
1502 bfa_fsm_set_state(device, bna_device_sm_ready);
1503 break;
1504
1505 case DEVICE_E_IOC_FAILED:
1506 bfa_fsm_set_state(device, bna_device_sm_failed);
1507 break;
1508
1509 case DEVICE_E_IOC_RESET:
1510 enable_mbox_intr(device);
1511 break;
1512
1513 default:
1514 bfa_sm_fault(event);
1515 }
1516}
1517
1518static void
1519bna_device_sm_ready_entry(struct bna_device *device)
1520{
1521 bna_mbox_mod_start(&device->bna->mbox_mod);
1522 bna_port_start(&device->bna->port);
1523
1524 if (device->ready_cbfn)
1525 device->ready_cbfn(device->ready_cbarg,
1526 BNA_CB_SUCCESS);
1527 device->ready_cbfn = NULL;
1528 device->ready_cbarg = NULL;
1529}
1530
1531static void
1532bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
1533{
1534 switch (event) {
1535 case DEVICE_E_DISABLE:
1536 bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
1537 break;
1538
1539 case DEVICE_E_IOC_FAILED:
1540 bfa_fsm_set_state(device, bna_device_sm_failed);
1541 break;
1542
1543 default:
1544 bfa_sm_fault(event);
1545 }
1546}
1547
1548static void
1549bna_device_sm_port_stop_wait_entry(struct bna_device *device)
1550{
1551 bna_port_stop(&device->bna->port);
1552}
1553
1554static void
1555bna_device_sm_port_stop_wait(struct bna_device *device,
1556 enum bna_device_event event)
1557{
1558 switch (event) {
1559 case DEVICE_E_PORT_STOPPED:
1560 bna_mbox_mod_stop(&device->bna->mbox_mod);
1561 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1562 break;
1563
1564 case DEVICE_E_IOC_FAILED:
1565 disable_mbox_intr(device);
1566 bna_port_fail(&device->bna->port);
1567 break;
1568
1569 default:
1570 bfa_sm_fault(event);
1571 }
1572}
1573
1574static void
1575bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
1576{
1577 bfa_nw_ioc_disable(&device->ioc);
1578}
1579
1580static void
1581bna_device_sm_ioc_disable_wait(struct bna_device *device,
1582 enum bna_device_event event)
1583{
1584 switch (event) {
1585 case DEVICE_E_IOC_DISABLED:
1586 disable_mbox_intr(device);
1587 bfa_fsm_set_state(device, bna_device_sm_stopped);
1588 break;
1589
1590 default:
1591 bfa_sm_fault(event);
1592 }
1593}
1594
1595static void
1596bna_device_sm_failed_entry(struct bna_device *device)
1597{
1598 disable_mbox_intr(device);
1599 bna_port_fail(&device->bna->port);
1600 bna_mbox_mod_stop(&device->bna->mbox_mod);
1601
1602 if (device->ready_cbfn)
1603 device->ready_cbfn(device->ready_cbarg,
1604 BNA_CB_FAIL);
1605 device->ready_cbfn = NULL;
1606 device->ready_cbarg = NULL;
1607}
1608
1609static void
1610bna_device_sm_failed(struct bna_device *device,
1611 enum bna_device_event event)
1612{
1613 switch (event) {
1614 case DEVICE_E_DISABLE:
1615 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1616 break;
1617
1618 case DEVICE_E_IOC_RESET:
1619 enable_mbox_intr(device);
1620 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1621 break;
1622
1623 default:
1624 bfa_sm_fault(event);
1625 }
1626}
1627
1628/* IOC callback functions */
1629
1630static void
1631bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
1632{
1633 struct bna_device *device = (struct bna_device *)dev;
1634
1635 if (error)
1636 bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1637 else
1638 bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
1639}
1640
1641static void
1642bna_device_cb_iocll_disabled(void *dev)
1643{
1644 struct bna_device *device = (struct bna_device *)dev;
1645
1646 bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
1647}
1648
1649static void
1650bna_device_cb_iocll_failed(void *dev)
1651{
1652 struct bna_device *device = (struct bna_device *)dev;
1653
1654 bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1655}
1656
1657static void
1658bna_device_cb_iocll_reset(void *dev)
1659{
1660 struct bna_device *device = (struct bna_device *)dev;
1661
1662 bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
1663}
1664
1665static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
1666 bna_device_cb_iocll_ready,
1667 bna_device_cb_iocll_disabled,
1668 bna_device_cb_iocll_failed,
1669 bna_device_cb_iocll_reset
1670};
1671
1672/* device */
1673static void
1674bna_adv_device_init(struct bna_device *device, struct bna *bna,
1675 struct bna_res_info *res_info)
1676{
1677 u8 *kva;
1678 u64 dma;
1679
1680 device->bna = bna;
1681
1682 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1683
1684 /**
1685 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1686 * DMA memory.
1687 */
1688 BNA_GET_DMA_ADDR(
1689 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1690 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1691
1692 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1693 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1694 kva += bfa_nw_cee_meminfo();
1695 dma += bfa_nw_cee_meminfo();
1696
1697}
1698
1699static void
1700bna_device_init(struct bna_device *device, struct bna *bna,
1701 struct bna_res_info *res_info)
1702{
1703 u64 dma;
1704
1705 device->bna = bna;
1706
1707 /**
1708 * Attach IOC and claim:
1709 * 1. DMA memory for IOC attributes
1710 * 2. Kernel memory for FW trace
1711 */
1712 bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
1713 bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
1714
1715 BNA_GET_DMA_ADDR(
1716 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1717 bfa_nw_ioc_mem_claim(&device->ioc,
1718 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
1719 dma);
1720
1721 bna_adv_device_init(device, bna, res_info);
1722 /*
1723 * Initialize mbox_mod only after IOC, so that mbox handler
1724 * registration goes through
1725 */
1726 device->intr_type =
1727 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
1728 device->vector =
1729 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
1730 bna_mbox_mod_init(&bna->mbox_mod, bna);
1731
1732 device->ready_cbfn = device->stop_cbfn = NULL;
1733 device->ready_cbarg = device->stop_cbarg = NULL;
1734
1735 bfa_fsm_set_state(device, bna_device_sm_stopped);
1736}
1737
1738static void
1739bna_device_uninit(struct bna_device *device)
1740{
1741 bna_mbox_mod_uninit(&device->bna->mbox_mod);
1742
1743 bfa_nw_ioc_detach(&device->ioc);
1744
1745 device->bna = NULL;
1746}
1747
1748static void
1749bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1750{
1751 struct bna_device *device = (struct bna_device *)arg;
1752
1753 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
1754}
1755
1756static int
1757bna_device_status_get(struct bna_device *device)
1758{
1759 return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
1760}
1761
1762void
1763bna_device_enable(struct bna_device *device)
1764{
1765 if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
1766 bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
1767 return;
1768 }
1769
1770 device->ready_cbfn = bnad_cb_device_enabled;
1771 device->ready_cbarg = device->bna->bnad;
1772
1773 bfa_fsm_send_event(device, DEVICE_E_ENABLE);
1774}
1775
1776void
1777bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
1778{
1779 if (type == BNA_SOFT_CLEANUP) {
1780 bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
1781 return;
1782 }
1783
1784 device->stop_cbfn = bnad_cb_device_disabled;
1785 device->stop_cbarg = device->bna->bnad;
1786
1787 bfa_fsm_send_event(device, DEVICE_E_DISABLE);
1788}
1789
1790static int
1791bna_device_state_get(struct bna_device *device)
1792{
1793 return bfa_sm_to_state(device_sm_table, device->fsm);
1794}
1795
1796const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1797 {12, 12},
1798 {6, 10},
1799 {5, 10},
1800 {4, 8},
1801 {3, 6},
1802 {3, 6},
1803 {2, 4},
1804 {1, 2},
1805};
1806
1807/* utils */
1808
1809static void
1810bna_adv_res_req(struct bna_res_info *res_info)
1811{
1812 /* DMA memory for COMMON_MODULE */
1813 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1814 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1815 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1816 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1817 bfa_nw_cee_meminfo(), PAGE_SIZE);
1818
1819 /* Virtual memory for retreiving fw_trc */
1820 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1821 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1822 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
1823 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
1824
1825 /* DMA memory for retreiving stats */
1826 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1827 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1828 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1829 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1830 ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
1831
1832 /* Virtual memory for soft stats */
1833 res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
1834 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1835 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
1836 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
1837 sizeof(struct bna_sw_stats);
1838}
1839
1840static void
1841bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
1842{
1843 struct bna_tx *tx;
1844 struct bna_txq *txq;
1845 struct bna_rx *rx;
1846 struct bna_rxp *rxp;
1847 struct list_head *qe;
1848 struct list_head *txq_qe;
1849 struct list_head *rxp_qe;
1850 struct list_head *mac_qe;
1851 int i;
1852
1853 sw_stats->device_state = bna_device_state_get(&bna->device);
1854 sw_stats->port_state = bna_port_state_get(&bna->port);
1855 sw_stats->port_flags = bna->port.flags;
1856 sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
1857 sw_stats->priority = bna->port.priority;
1858
1859 i = 0;
1860 list_for_each(qe, &bna->tx_mod.tx_active_q) {
1861 tx = (struct bna_tx *)qe;
1862 sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
1863 sw_stats->tx_stats[i].tx_flags = tx->flags;
1864
1865 sw_stats->tx_stats[i].num_txqs = 0;
1866 sw_stats->tx_stats[i].txq_bmap[0] = 0;
1867 sw_stats->tx_stats[i].txq_bmap[1] = 0;
1868 list_for_each(txq_qe, &tx->txq_q) {
1869 txq = (struct bna_txq *)txq_qe;
1870 if (txq->txq_id < 32)
1871 sw_stats->tx_stats[i].txq_bmap[0] |=
1872 ((u32)1 << txq->txq_id);
1873 else
1874 sw_stats->tx_stats[i].txq_bmap[1] |=
1875 ((u32)
1876 1 << (txq->txq_id - 32));
1877 sw_stats->tx_stats[i].num_txqs++;
1878 }
1879
1880 sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
1881
1882 i++;
1883 }
1884 sw_stats->num_active_tx = i;
1885
1886 i = 0;
1887 list_for_each(qe, &bna->rx_mod.rx_active_q) {
1888 rx = (struct bna_rx *)qe;
1889 sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
1890 sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
1891
1892 sw_stats->rx_stats[i].num_rxps = 0;
1893 sw_stats->rx_stats[i].num_rxqs = 0;
1894 sw_stats->rx_stats[i].rxq_bmap[0] = 0;
1895 sw_stats->rx_stats[i].rxq_bmap[1] = 0;
1896 sw_stats->rx_stats[i].cq_bmap[0] = 0;
1897 sw_stats->rx_stats[i].cq_bmap[1] = 0;
1898 list_for_each(rxp_qe, &rx->rxp_q) {
1899 rxp = (struct bna_rxp *)rxp_qe;
1900
1901 sw_stats->rx_stats[i].num_rxqs += 1;
1902
1903 if (rxp->type == BNA_RXP_SINGLE) {
1904 if (rxp->rxq.single.only->rxq_id < 32) {
1905 sw_stats->rx_stats[i].rxq_bmap[0] |=
1906 ((u32)1 <<
1907 rxp->rxq.single.only->rxq_id);
1908 } else {
1909 sw_stats->rx_stats[i].rxq_bmap[1] |=
1910 ((u32)1 <<
1911 (rxp->rxq.single.only->rxq_id - 32));
1912 }
1913 } else {
1914 if (rxp->rxq.slr.large->rxq_id < 32) {
1915 sw_stats->rx_stats[i].rxq_bmap[0] |=
1916 ((u32)1 <<
1917 rxp->rxq.slr.large->rxq_id);
1918 } else {
1919 sw_stats->rx_stats[i].rxq_bmap[1] |=
1920 ((u32)1 <<
1921 (rxp->rxq.slr.large->rxq_id - 32));
1922 }
1923
1924 if (rxp->rxq.slr.small->rxq_id < 32) {
1925 sw_stats->rx_stats[i].rxq_bmap[0] |=
1926 ((u32)1 <<
1927 rxp->rxq.slr.small->rxq_id);
1928 } else {
1929 sw_stats->rx_stats[i].rxq_bmap[1] |=
1930 ((u32)1 <<
1931 (rxp->rxq.slr.small->rxq_id - 32));
1932 }
1933 sw_stats->rx_stats[i].num_rxqs += 1;
1934 }
1935
1936 if (rxp->cq.cq_id < 32)
1937 sw_stats->rx_stats[i].cq_bmap[0] |=
1938 (1 << rxp->cq.cq_id);
1939 else
1940 sw_stats->rx_stats[i].cq_bmap[1] |=
1941 (1 << (rxp->cq.cq_id - 32));
1942
1943 sw_stats->rx_stats[i].num_rxps++;
1944 }
1945
1946 sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
1947 sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
1948 sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
1949
1950 sw_stats->rx_stats[i].num_active_ucast = 0;
1951 if (rx->rxf.ucast_active_mac)
1952 sw_stats->rx_stats[i].num_active_ucast++;
1953 list_for_each(mac_qe, &rx->rxf.ucast_active_q)
1954 sw_stats->rx_stats[i].num_active_ucast++;
1955
1956 sw_stats->rx_stats[i].num_active_mcast = 0;
1957 list_for_each(mac_qe, &rx->rxf.mcast_active_q)
1958 sw_stats->rx_stats[i].num_active_mcast++;
1959
1960 sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
1961 sw_stats->rx_stats[i].vlan_filter_status =
1962 rx->rxf.vlan_filter_status;
1963 memcpy(sw_stats->rx_stats[i].vlan_filter_table,
1964 rx->rxf.vlan_filter_table,
1965 sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
1966
1967 sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
1968 sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
1969
1970 i++;
1971 }
1972 sw_stats->num_active_rx = i;
1973}
1974
1975static void
1976bna_fw_cb_stats_get(void *arg, int status)
1977{
1978 struct bna *bna = (struct bna *)arg;
1979 u64 *p_stats;
1980 int i, count;
1981 int rxf_count, txf_count;
1982 u64 rxf_bmap, txf_bmap;
1983
1984 bfa_q_qe_init(&bna->mbox_qe.qe);
1985
1986 if (status == 0) {
1987 p_stats = (u64 *)bna->stats.hw_stats;
1988 count = sizeof(struct bfi_ll_stats) / sizeof(u64);
1989 for (i = 0; i < count; i++)
1990 p_stats[i] = cpu_to_be64(p_stats[i]);
1991
1992 rxf_count = 0;
1993 rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
1994 ((u64)bna->stats.rxf_bmap[1] << 32);
1995 for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
1996 if (rxf_bmap & ((u64)1 << i))
1997 rxf_count++;
1998
1999 txf_count = 0;
2000 txf_bmap = (u64)bna->stats.txf_bmap[0] |
2001 ((u64)bna->stats.txf_bmap[1] << 32);
2002 for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
2003 if (txf_bmap & ((u64)1 << i))
2004 txf_count++;
2005
2006 p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
2007 ((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
2008 txf_count * sizeof(struct bfi_ll_stats_txf))/
2009 sizeof(u64));
2010
2011 /* Populate the TXF stats from the firmware DMAed copy */
2012 for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
2013 if (txf_bmap & ((u64)1 << i)) {
2014 p_stats -= sizeof(struct bfi_ll_stats_txf)/
2015 sizeof(u64);
2016 memcpy(&bna->stats.hw_stats->txf_stats[i],
2017 p_stats,
2018 sizeof(struct bfi_ll_stats_txf));
2019 }
2020
2021 /* Populate the RXF stats from the firmware DMAed copy */
2022 for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
2023 if (rxf_bmap & ((u64)1 << i)) {
2024 p_stats -= sizeof(struct bfi_ll_stats_rxf)/
2025 sizeof(u64);
2026 memcpy(&bna->stats.hw_stats->rxf_stats[i],
2027 p_stats,
2028 sizeof(struct bfi_ll_stats_rxf));
2029 }
2030
2031 bna_sw_stats_get(bna, bna->stats.sw_stats);
2032 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
2033 } else
2034 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2035}
2036
2037static void
2038bna_fw_stats_get(struct bna *bna)
2039{
2040 struct bfi_ll_stats_req ll_req;
2041
2042 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
2043 ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2044
2045 ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
2046 ll_req.rxf_id_mask[1] = htonl(bna->rx_mod.rxf_bmap[1]);
2047 ll_req.txf_id_mask[0] = htonl(bna->tx_mod.txf_bmap[0]);
2048 ll_req.txf_id_mask[1] = htonl(bna->tx_mod.txf_bmap[1]);
2049
2050 ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
2051 ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
2052
2053 bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2054 bna_fw_cb_stats_get, bna);
2055 bna_mbox_send(bna, &bna->mbox_qe);
2056
2057 bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
2058 bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
2059 bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
2060 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
2061}
2062
2063void
2064bna_stats_get(struct bna *bna)
2065{
2066 if (bna_device_status_get(&bna->device))
2067 bna_fw_stats_get(bna);
2068 else
2069 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2070}
2071
2072/* IB */
2073static void
2074bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
2075{
2076 ib->ib_config.coalescing_timeo = coalescing_timeo;
2077
2078 if (ib->start_count)
2079 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
2080 (u32)ib->ib_config.coalescing_timeo, 0);
2081}
2082
2083/* RxF */
2084void
2085bna_rxf_adv_init(struct bna_rxf *rxf,
2086 struct bna_rx *rx,
2087 struct bna_rx_config *q_config)
2088{
2089 switch (q_config->rxp_type) {
2090 case BNA_RXP_SINGLE:
2091 /* No-op */
2092 break;
2093 case BNA_RXP_SLR:
2094 rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
2095 break;
2096 case BNA_RXP_HDS:
2097 rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
2098 rxf->hds_cfg.header_size =
2099 q_config->hds_config.header_size;
2100 rxf->forced_offset = 0;
2101 break;
2102 default:
2103 break;
2104 }
2105
2106 if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
2107 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
2108 rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
2109 rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
2110 memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
2111 &q_config->rss_config.toeplitz_hash_key[0],
2112 sizeof(rxf->rss_cfg.toeplitz_hash_key));
2113 }
2114}
2115
2116static void
2117rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
2118{
2119 struct bfi_ll_rxf_req req;
2120
2121 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
2122
2123 req.rxf_id = rxf->rxf_id;
2124 req.enable = status;
2125
2126 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
2127 rxf_cb_cam_fltr_mbox_cmd, rxf);
2128
2129 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
2130}
2131
2132int
2133rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
2134{
2135 struct bna_mac *mac = NULL;
2136 struct list_head *qe;
2137
2138 /* Add additional MAC entries */
2139 if (!list_empty(&rxf->ucast_pending_add_q)) {
2140 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
2141 bfa_q_qe_init(qe);
2142 mac = (struct bna_mac *)qe;
2143 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
2144 list_add_tail(&mac->qe, &rxf->ucast_active_q);
2145 return 1;
2146 }
2147
2148 /* Delete MAC addresses previousely added */
2149 if (!list_empty(&rxf->ucast_pending_del_q)) {
2150 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2151 bfa_q_qe_init(qe);
2152 mac = (struct bna_mac *)qe;
2153 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2154 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2155 return 1;
2156 }
2157
2158 return 0;
2159}
2160
2161int
2162rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
2163{
2164 struct bna *bna = rxf->rx->bna;
2165
2166 /* Enable/disable promiscuous mode */
2167 if (is_promisc_enable(rxf->rxmode_pending,
2168 rxf->rxmode_pending_bitmask)) {
2169 /* move promisc configuration from pending -> active */
2170 promisc_inactive(rxf->rxmode_pending,
2171 rxf->rxmode_pending_bitmask);
2172 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
2173
2174 /* Disable VLAN filter to allow all VLANs */
2175 __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
2176 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2177 BNA_STATUS_T_ENABLED);
2178 return 1;
2179 } else if (is_promisc_disable(rxf->rxmode_pending,
2180 rxf->rxmode_pending_bitmask)) {
2181 /* move promisc configuration from pending -> active */
2182 promisc_inactive(rxf->rxmode_pending,
2183 rxf->rxmode_pending_bitmask);
2184 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2185 bna->rxf_promisc_id = BFI_MAX_RXF;
2186
2187 /* Revert VLAN filter */
2188 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2189 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2190 BNA_STATUS_T_DISABLED);
2191 return 1;
2192 }
2193
2194 return 0;
2195}
2196
2197int
2198rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
2199{
2200 /* Enable/disable allmulti mode */
2201 if (is_allmulti_enable(rxf->rxmode_pending,
2202 rxf->rxmode_pending_bitmask)) {
2203 /* move allmulti configuration from pending -> active */
2204 allmulti_inactive(rxf->rxmode_pending,
2205 rxf->rxmode_pending_bitmask);
2206 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
2207
2208 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2209 BNA_STATUS_T_ENABLED);
2210 return 1;
2211 } else if (is_allmulti_disable(rxf->rxmode_pending,
2212 rxf->rxmode_pending_bitmask)) {
2213 /* move allmulti configuration from pending -> active */
2214 allmulti_inactive(rxf->rxmode_pending,
2215 rxf->rxmode_pending_bitmask);
2216 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2217
2218 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2219 BNA_STATUS_T_DISABLED);
2220 return 1;
2221 }
2222
2223 return 0;
2224}
2225
2226int
2227rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
2228{
2229 struct bna_mac *mac = NULL;
2230 struct list_head *qe;
2231
2232 /* 1. delete pending ucast entries */
2233 if (!list_empty(&rxf->ucast_pending_del_q)) {
2234 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2235 bfa_q_qe_init(qe);
2236 mac = (struct bna_mac *)qe;
2237 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2238 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2239 return 1;
2240 }
2241
2242 /* 2. clear active ucast entries; move them to pending_add_q */
2243 if (!list_empty(&rxf->ucast_active_q)) {
2244 bfa_q_deq(&rxf->ucast_active_q, &qe);
2245 bfa_q_qe_init(qe);
2246 mac = (struct bna_mac *)qe;
2247 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2248 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
2249 return 1;
2250 }
2251
2252 return 0;
2253}
2254
2255int
2256rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
2257{
2258 struct bna *bna = rxf->rx->bna;
2259
2260 /* 6. Execute pending promisc mode disable command */
2261 if (is_promisc_disable(rxf->rxmode_pending,
2262 rxf->rxmode_pending_bitmask)) {
2263 /* move promisc configuration from pending -> active */
2264 promisc_inactive(rxf->rxmode_pending,
2265 rxf->rxmode_pending_bitmask);
2266 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2267 bna->rxf_promisc_id = BFI_MAX_RXF;
2268
2269 /* Revert VLAN filter */
2270 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2271 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2272 BNA_STATUS_T_DISABLED);
2273 return 1;
2274 }
2275
2276 /* 7. Clear active promisc mode; move it to pending enable */
2277 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2278 /* move promisc configuration from active -> pending */
2279 promisc_enable(rxf->rxmode_pending,
2280 rxf->rxmode_pending_bitmask);
2281 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2282
2283 /* Revert VLAN filter */
2284 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2285 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2286 BNA_STATUS_T_DISABLED);
2287 return 1;
2288 }
2289
2290 return 0;
2291}
2292
2293int
2294rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
2295{
2296 /* 10. Execute pending allmulti mode disable command */
2297 if (is_allmulti_disable(rxf->rxmode_pending,
2298 rxf->rxmode_pending_bitmask)) {
2299 /* move allmulti configuration from pending -> active */
2300 allmulti_inactive(rxf->rxmode_pending,
2301 rxf->rxmode_pending_bitmask);
2302 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2303 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2304 BNA_STATUS_T_DISABLED);
2305 return 1;
2306 }
2307
2308 /* 11. Clear active allmulti mode; move it to pending enable */
2309 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2310 /* move allmulti configuration from active -> pending */
2311 allmulti_enable(rxf->rxmode_pending,
2312 rxf->rxmode_pending_bitmask);
2313 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2314 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2315 BNA_STATUS_T_DISABLED);
2316 return 1;
2317 }
2318
2319 return 0;
2320}
2321
2322void
2323rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
2324{
2325 struct list_head *qe;
2326 struct bna_mac *mac;
2327
2328 /* 1. Move active ucast entries to pending_add_q */
2329 while (!list_empty(&rxf->ucast_active_q)) {
2330 bfa_q_deq(&rxf->ucast_active_q, &qe);
2331 bfa_q_qe_init(qe);
2332 list_add_tail(qe, &rxf->ucast_pending_add_q);
2333 }
2334
2335 /* 2. Throw away delete pending ucast entries */
2336 while (!list_empty(&rxf->ucast_pending_del_q)) {
2337 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2338 bfa_q_qe_init(qe);
2339 mac = (struct bna_mac *)qe;
2340 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2341 }
2342}
2343
2344void
2345rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
2346{
2347 struct bna *bna = rxf->rx->bna;
2348
2349 /* 6. Clear pending promisc mode disable */
2350 if (is_promisc_disable(rxf->rxmode_pending,
2351 rxf->rxmode_pending_bitmask)) {
2352 promisc_inactive(rxf->rxmode_pending,
2353 rxf->rxmode_pending_bitmask);
2354 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2355 bna->rxf_promisc_id = BFI_MAX_RXF;
2356 }
2357
2358 /* 7. Move promisc mode config from active -> pending */
2359 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2360 promisc_enable(rxf->rxmode_pending,
2361 rxf->rxmode_pending_bitmask);
2362 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2363 }
2364
2365}
2366
2367void
2368rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
2369{
2370 /* 10. Clear pending allmulti mode disable */
2371 if (is_allmulti_disable(rxf->rxmode_pending,
2372 rxf->rxmode_pending_bitmask)) {
2373 allmulti_inactive(rxf->rxmode_pending,
2374 rxf->rxmode_pending_bitmask);
2375 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2376 }
2377
2378 /* 11. Move allmulti mode config from active -> pending */
2379 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2380 allmulti_enable(rxf->rxmode_pending,
2381 rxf->rxmode_pending_bitmask);
2382 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2383 }
2384}
2385
2386/**
2387 * Should only be called by bna_rxf_mode_set.
2388 * Helps deciding if h/w configuration is needed or not.
2389 * Returns:
2390 * 0 = no h/w change
2391 * 1 = need h/w change
2392 */
2393static int
2394rxf_promisc_enable(struct bna_rxf *rxf)
2395{
2396 struct bna *bna = rxf->rx->bna;
2397 int ret = 0;
2398
2399 /* There can not be any pending disable command */
2400
2401 /* Do nothing if pending enable or already enabled */
2402 if (is_promisc_enable(rxf->rxmode_pending,
2403 rxf->rxmode_pending_bitmask) ||
2404 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
2405 /* Schedule enable */
2406 } else {
2407 /* Promisc mode should not be active in the system */
2408 promisc_enable(rxf->rxmode_pending,
2409 rxf->rxmode_pending_bitmask);
2410 bna->rxf_promisc_id = rxf->rxf_id;
2411 ret = 1;
2412 }
2413
2414 return ret;
2415}
2416
2417/**
2418 * Should only be called by bna_rxf_mode_set.
2419 * Helps deciding if h/w configuration is needed or not.
2420 * Returns:
2421 * 0 = no h/w change
2422 * 1 = need h/w change
2423 */
2424static int
2425rxf_promisc_disable(struct bna_rxf *rxf)
2426{
2427 struct bna *bna = rxf->rx->bna;
2428 int ret = 0;
2429
2430 /* There can not be any pending disable */
2431
2432 /* Turn off pending enable command , if any */
2433 if (is_promisc_enable(rxf->rxmode_pending,
2434 rxf->rxmode_pending_bitmask)) {
2435 /* Promisc mode should not be active */
2436 /* system promisc state should be pending */
2437 promisc_inactive(rxf->rxmode_pending,
2438 rxf->rxmode_pending_bitmask);
2439 /* Remove the promisc state from the system */
2440 bna->rxf_promisc_id = BFI_MAX_RXF;
2441
2442 /* Schedule disable */
2443 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2444 /* Promisc mode should be active in the system */
2445 promisc_disable(rxf->rxmode_pending,
2446 rxf->rxmode_pending_bitmask);
2447 ret = 1;
2448
2449 /* Do nothing if already disabled */
2450 } else {
2451 }
2452
2453 return ret;
2454}
2455
2456/**
2457 * Should only be called by bna_rxf_mode_set.
2458 * Helps deciding if h/w configuration is needed or not.
2459 * Returns:
2460 * 0 = no h/w change
2461 * 1 = need h/w change
2462 */
2463static int
2464rxf_allmulti_enable(struct bna_rxf *rxf)
2465{
2466 int ret = 0;
2467
2468 /* There can not be any pending disable command */
2469
2470 /* Do nothing if pending enable or already enabled */
2471 if (is_allmulti_enable(rxf->rxmode_pending,
2472 rxf->rxmode_pending_bitmask) ||
2473 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
2474 /* Schedule enable */
2475 } else {
2476 allmulti_enable(rxf->rxmode_pending,
2477 rxf->rxmode_pending_bitmask);
2478 ret = 1;
2479 }
2480
2481 return ret;
2482}
2483
2484/**
2485 * Should only be called by bna_rxf_mode_set.
2486 * Helps deciding if h/w configuration is needed or not.
2487 * Returns:
2488 * 0 = no h/w change
2489 * 1 = need h/w change
2490 */
2491static int
2492rxf_allmulti_disable(struct bna_rxf *rxf)
2493{
2494 int ret = 0;
2495
2496 /* There can not be any pending disable */
2497
2498 /* Turn off pending enable command , if any */
2499 if (is_allmulti_enable(rxf->rxmode_pending,
2500 rxf->rxmode_pending_bitmask)) {
2501 /* Allmulti mode should not be active */
2502 allmulti_inactive(rxf->rxmode_pending,
2503 rxf->rxmode_pending_bitmask);
2504
2505 /* Schedule disable */
2506 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2507 allmulti_disable(rxf->rxmode_pending,
2508 rxf->rxmode_pending_bitmask);
2509 ret = 1;
2510 }
2511
2512 return ret;
2513}
2514
2515/* RxF <- bnad */
2516enum bna_cb_status
2517bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2518 enum bna_rxmode bitmask,
2519 void (*cbfn)(struct bnad *, struct bna_rx *,
2520 enum bna_cb_status))
2521{
2522 struct bna_rxf *rxf = &rx->rxf;
2523 int need_hw_config = 0;
2524
2525 /* Process the commands */
2526
2527 if (is_promisc_enable(new_mode, bitmask)) {
2528 /* If promisc mode is already enabled elsewhere in the system */
2529 if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
2530 (rx->bna->rxf_promisc_id != rxf->rxf_id))
2531 goto err_return;
2532 if (rxf_promisc_enable(rxf))
2533 need_hw_config = 1;
2534 } else if (is_promisc_disable(new_mode, bitmask)) {
2535 if (rxf_promisc_disable(rxf))
2536 need_hw_config = 1;
2537 }
2538
2539 if (is_allmulti_enable(new_mode, bitmask)) {
2540 if (rxf_allmulti_enable(rxf))
2541 need_hw_config = 1;
2542 } else if (is_allmulti_disable(new_mode, bitmask)) {
2543 if (rxf_allmulti_disable(rxf))
2544 need_hw_config = 1;
2545 }
2546
2547 /* Trigger h/w if needed */
2548
2549 if (need_hw_config) {
2550 rxf->cam_fltr_cbfn = cbfn;
2551 rxf->cam_fltr_cbarg = rx->bna->bnad;
2552 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2553 } else if (cbfn)
2554 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2555
2556 return BNA_CB_SUCCESS;
2557
2558err_return:
2559 return BNA_CB_FAIL;
2560}
2561
2562void
2563/* RxF <- bnad */
2564bna_rx_vlanfilter_enable(struct bna_rx *rx)
2565{
2566 struct bna_rxf *rxf = &rx->rxf;
2567
2568 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2569 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
2570 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2571 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2572 }
2573}
2574
2575/* Rx */
2576
2577/* Rx <- bnad */
2578void
2579bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2580{
2581 struct bna_rxp *rxp;
2582 struct list_head *qe;
2583
2584 list_for_each(qe, &rx->rxp_q) {
2585 rxp = (struct bna_rxp *)qe;
2586 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2587 bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
2588 }
2589}
2590
2591/* Rx <- bnad */
2592void
2593bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2594{
2595 int i, j;
2596
2597 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2598 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2599 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2600}
2601
2602/* Rx <- bnad */
2603void
2604bna_rx_dim_update(struct bna_ccb *ccb)
2605{
2606 struct bna *bna = ccb->cq->rx->bna;
2607 u32 load, bias;
2608 u32 pkt_rt, small_rt, large_rt;
2609 u8 coalescing_timeo;
2610
2611 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2612 (ccb->pkt_rate.large_pkt_cnt == 0))
2613 return;
2614
2615 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2616
2617 small_rt = ccb->pkt_rate.small_pkt_cnt;
2618 large_rt = ccb->pkt_rate.large_pkt_cnt;
2619
2620 pkt_rt = small_rt + large_rt;
2621
2622 if (pkt_rt < BNA_PKT_RATE_10K)
2623 load = BNA_LOAD_T_LOW_4;
2624 else if (pkt_rt < BNA_PKT_RATE_20K)
2625 load = BNA_LOAD_T_LOW_3;
2626 else if (pkt_rt < BNA_PKT_RATE_30K)
2627 load = BNA_LOAD_T_LOW_2;
2628 else if (pkt_rt < BNA_PKT_RATE_40K)
2629 load = BNA_LOAD_T_LOW_1;
2630 else if (pkt_rt < BNA_PKT_RATE_50K)
2631 load = BNA_LOAD_T_HIGH_1;
2632 else if (pkt_rt < BNA_PKT_RATE_60K)
2633 load = BNA_LOAD_T_HIGH_2;
2634 else if (pkt_rt < BNA_PKT_RATE_80K)
2635 load = BNA_LOAD_T_HIGH_3;
2636 else
2637 load = BNA_LOAD_T_HIGH_4;
2638
2639 if (small_rt > (large_rt << 1))
2640 bias = 0;
2641 else
2642 bias = 1;
2643
2644 ccb->pkt_rate.small_pkt_cnt = 0;
2645 ccb->pkt_rate.large_pkt_cnt = 0;
2646
2647 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2648 ccb->rx_coalescing_timeo = coalescing_timeo;
2649
2650 /* Set it to IB */
2651 bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
2652}
2653
2654/* Tx */
2655/* TX <- bnad */
2656void
2657bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
2658{
2659 struct bna_txq *txq;
2660 struct list_head *qe;
2661
2662 list_for_each(qe, &tx->txq_q) {
2663 txq = (struct bna_txq *)qe;
2664 bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
2665 }
2666}
2667
2668/*
2669 * Private data
2670 */
2671
2672struct bna_ritseg_pool_cfg {
2673 u32 pool_size;
2674 u32 pool_entry_size;
2675};
2676init_ritseg_pool(ritseg_pool_cfg);
2677
2678/*
2679 * Private functions
2680 */
2681static void
2682bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
2683 struct bna_res_info *res_info)
2684{
2685 int i;
2686
2687 ucam_mod->ucmac = (struct bna_mac *)
2688 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
2689
2690 INIT_LIST_HEAD(&ucam_mod->free_q);
2691 for (i = 0; i < BFI_MAX_UCMAC; i++) {
2692 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
2693 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
2694 }
2695
2696 ucam_mod->bna = bna;
2697}
2698
2699static void
2700bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
2701{
2702 struct list_head *qe;
2703 int i = 0;
2704
2705 list_for_each(qe, &ucam_mod->free_q)
2706 i++;
2707
2708 ucam_mod->bna = NULL;
2709}
2710
2711static void
2712bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
2713 struct bna_res_info *res_info)
2714{
2715 int i;
2716
2717 mcam_mod->mcmac = (struct bna_mac *)
2718 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
2719
2720 INIT_LIST_HEAD(&mcam_mod->free_q);
2721 for (i = 0; i < BFI_MAX_MCMAC; i++) {
2722 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
2723 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
2724 }
2725
2726 mcam_mod->bna = bna;
2727}
2728
2729static void
2730bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
2731{
2732 struct list_head *qe;
2733 int i = 0;
2734
2735 list_for_each(qe, &mcam_mod->free_q)
2736 i++;
2737
2738 mcam_mod->bna = NULL;
2739}
2740
2741static void
2742bna_rit_mod_init(struct bna_rit_mod *rit_mod,
2743 struct bna_res_info *res_info)
2744{
2745 int i;
2746 int j;
2747 int count;
2748 int offset;
2749
2750 rit_mod->rit = (struct bna_rit_entry *)
2751 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
2752 rit_mod->rit_segment = (struct bna_rit_segment *)
2753 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
2754
2755 count = 0;
2756 offset = 0;
2757 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
2758 INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
2759 for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
2760 bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
2761 rit_mod->rit_segment[count].max_rit_size =
2762 ritseg_pool_cfg[i].pool_entry_size;
2763 rit_mod->rit_segment[count].rit_offset = offset;
2764 rit_mod->rit_segment[count].rit =
2765 &rit_mod->rit[offset];
2766 list_add_tail(&rit_mod->rit_segment[count].qe,
2767 &rit_mod->rit_seg_pool[i]);
2768 count++;
2769 offset += ritseg_pool_cfg[i].pool_entry_size;
2770 }
2771 }
2772}
2773
2774/*
2775 * Public functions
2776 */
2777
2778/* Called during probe(), before calling bna_init() */
2779void
2780bna_res_req(struct bna_res_info *res_info)
2781{
2782 bna_adv_res_req(res_info);
2783
2784 /* DMA memory for retrieving IOC attributes */
2785 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
2786 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
2787 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
2788 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
2789 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
2790
2791 /* DMA memory for index segment of an IB */
2792 res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2793 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
2794 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
2795 BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
2796 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
2797
2798 /* Virtual memory for IB objects - stored by IB module */
2799 res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
2800 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
2801 BNA_MEM_T_KVA;
2802 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
2803 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
2804 BFI_MAX_IB * sizeof(struct bna_ib);
2805
2806 /* Virtual memory for intr objects - stored by IB module */
2807 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
2808 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
2809 BNA_MEM_T_KVA;
2810 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
2811 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
2812 BFI_MAX_IB * sizeof(struct bna_intr);
2813
2814 /* Virtual memory for idx_seg objects - stored by IB module */
2815 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
2816 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
2817 BNA_MEM_T_KVA;
2818 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
2819 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
2820 BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
2821
2822 /* Virtual memory for Tx objects - stored by Tx module */
2823 res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
2824 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
2825 BNA_MEM_T_KVA;
2826 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
2827 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
2828 BFI_MAX_TXQ * sizeof(struct bna_tx);
2829
2830 /* Virtual memory for TxQ - stored by Tx module */
2831 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
2832 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
2833 BNA_MEM_T_KVA;
2834 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
2835 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
2836 BFI_MAX_TXQ * sizeof(struct bna_txq);
2837
2838 /* Virtual memory for Rx objects - stored by Rx module */
2839 res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
2840 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
2841 BNA_MEM_T_KVA;
2842 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
2843 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
2844 BFI_MAX_RXQ * sizeof(struct bna_rx);
2845
2846 /* Virtual memory for RxPath - stored by Rx module */
2847 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
2848 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
2849 BNA_MEM_T_KVA;
2850 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
2851 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
2852 BFI_MAX_RXQ * sizeof(struct bna_rxp);
2853
2854 /* Virtual memory for RxQ - stored by Rx module */
2855 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
2856 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
2857 BNA_MEM_T_KVA;
2858 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
2859 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
2860 BFI_MAX_RXQ * sizeof(struct bna_rxq);
2861
2862 /* Virtual memory for Unicast MAC address - stored by ucam module */
2863 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2864 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
2865 BNA_MEM_T_KVA;
2866 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
2867 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
2868 BFI_MAX_UCMAC * sizeof(struct bna_mac);
2869
2870 /* Virtual memory for Multicast MAC address - stored by mcam module */
2871 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2872 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
2873 BNA_MEM_T_KVA;
2874 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
2875 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
2876 BFI_MAX_MCMAC * sizeof(struct bna_mac);
2877
2878 /* Virtual memory for RIT entries */
2879 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
2880 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
2881 BNA_MEM_T_KVA;
2882 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
2883 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
2884 BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
2885
2886 /* Virtual memory for RIT segment table */
2887 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
2888 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
2889 BNA_MEM_T_KVA;
2890 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
2891 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
2892 BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
2893
2894 /* Interrupt resource for mailbox interrupt */
2895 res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
2896 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
2897 BNA_INTR_T_MSIX;
2898 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
2899}
2900
2901/* Called during probe() */
2902void
2903bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
2904 struct bna_res_info *res_info)
2905{
2906 bna->bnad = bnad;
2907 bna->pcidev = *pcidev;
2908
2909 bna->stats.hw_stats = (struct bfi_ll_stats *)
2910 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2911 bna->hw_stats_dma.msb =
2912 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2913 bna->hw_stats_dma.lsb =
2914 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2915 bna->stats.sw_stats = (struct bna_sw_stats *)
2916 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
2917
2918 bna->regs.page_addr = bna->pcidev.pci_bar_kva +
2919 reg_offset[bna->pcidev.pci_func].page_addr;
2920 bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
2921 reg_offset[bna->pcidev.pci_func].fn_int_status;
2922 bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
2923 reg_offset[bna->pcidev.pci_func].fn_int_mask;
2924
2925 if (bna->pcidev.pci_func < 3)
2926 bna->port_num = 0;
2927 else
2928 bna->port_num = 1;
2929
2930 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
2931 bna_device_init(&bna->device, bna, res_info);
2932
2933 bna_port_init(&bna->port, bna);
2934
2935 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2936
2937 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2938
2939 bna_ib_mod_init(&bna->ib_mod, bna, res_info);
2940
2941 bna_rit_mod_init(&bna->rit_mod, res_info);
2942
2943 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2944
2945 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2946
2947 bna->rxf_promisc_id = BFI_MAX_RXF;
2948
2949 /* Mbox q element for posting stat request to f/w */
2950 bfa_q_qe_init(&bna->mbox_qe.qe);
2951}
2952
2953void
2954bna_uninit(struct bna *bna)
2955{
2956 bna_mcam_mod_uninit(&bna->mcam_mod);
2957
2958 bna_ucam_mod_uninit(&bna->ucam_mod);
2959
2960 bna_ib_mod_uninit(&bna->ib_mod);
2961
2962 bna_rx_mod_uninit(&bna->rx_mod);
2963
2964 bna_tx_mod_uninit(&bna->tx_mod);
2965
2966 bna_port_uninit(&bna->port);
2967
2968 bna_device_uninit(&bna->device);
2969
2970 bna->bnad = NULL;
2971}
2972
2973struct bna_mac *
2974bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2975{
2976 struct list_head *qe;
2977
2978 if (list_empty(&ucam_mod->free_q))
2979 return NULL;
2980
2981 bfa_q_deq(&ucam_mod->free_q, &qe);
2982
2983 return (struct bna_mac *)qe;
2984}
2985
2986void
2987bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
2988{
2989 list_add_tail(&mac->qe, &ucam_mod->free_q);
2990}
2991
2992struct bna_mac *
2993bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
2994{
2995 struct list_head *qe;
2996
2997 if (list_empty(&mcam_mod->free_q))
2998 return NULL;
2999
3000 bfa_q_deq(&mcam_mod->free_q, &qe);
3001
3002 return (struct bna_mac *)qe;
3003}
3004
3005void
3006bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
3007{
3008 list_add_tail(&mac->qe, &mcam_mod->free_q);
3009}
3010
3011/**
3012 * Note: This should be called in the same locking context as the call to
3013 * bna_rit_mod_seg_get()
3014 */
3015int
3016bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
3017{
3018 int i;
3019
3020 /* Select the pool for seg_size */
3021 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3022 if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
3023 break;
3024 }
3025
3026 if (i == BFI_RIT_SEG_TOTAL_POOLS)
3027 return 0;
3028
3029 if (list_empty(&rit_mod->rit_seg_pool[i]))
3030 return 0;
3031
3032 return 1;
3033}
3034
3035struct bna_rit_segment *
3036bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
3037{
3038 struct bna_rit_segment *seg;
3039 struct list_head *qe;
3040 int i;
3041
3042 /* Select the pool for seg_size */
3043 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3044 if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
3045 break;
3046 }
3047
3048 if (i == BFI_RIT_SEG_TOTAL_POOLS)
3049 return NULL;
3050
3051 if (list_empty(&rit_mod->rit_seg_pool[i]))
3052 return NULL;
3053
3054 bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
3055 seg = (struct bna_rit_segment *)qe;
3056 bfa_q_qe_init(&seg->qe);
3057 seg->rit_size = seg_size;
3058
3059 return seg;
3060}
3061
3062void
3063bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
3064 struct bna_rit_segment *seg)
3065{
3066 int i;
3067
3068 /* Select the pool for seg->max_rit_size */
3069 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3070 if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
3071 break;
3072 }
3073
3074 seg->rit_size = 0;
3075 list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
3076}
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw.h b/drivers/net/ethernet/brocade/bna/bna_hw.h
new file mode 100644
index 000000000000..16a5eed4a03b
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_hw.h
@@ -0,0 +1,1492 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/**
20 * File for interrupt macros and functions
21 */
22
23#ifndef __BNA_HW_H__
24#define __BNA_HW_H__
25
26#include "bfi_reg.h"
27
28/**
29 *
30 * SW imposed limits
31 *
32 */
33
34#ifndef BNA_BIOS_BUILD
35
36#define BFI_MAX_TXQ 64
37#define BFI_MAX_RXQ 64
38#define BFI_MAX_RXF 64
39#define BFI_MAX_IB 128
40#define BFI_MAX_RIT_SIZE 256
41#define BFI_RSS_RIT_SIZE 64
42#define BFI_NONRSS_RIT_SIZE 1
43#define BFI_MAX_UCMAC 256
44#define BFI_MAX_MCMAC 512
45#define BFI_IBIDX_SIZE 4
46#define BFI_MAX_VLAN 4095
47
48/**
49 * There are 2 free IB index pools:
50 * pool1: 120 segments of 1 index each
51 * pool8: 1 segment of 8 indexes
52 */
53#define BFI_IBIDX_POOL1_SIZE 116
54#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
55#define BFI_IBIDX_POOL2_SIZE 2
56#define BFI_IBIDX_POOL2_ENTRY_SIZE 2
57#define BFI_IBIDX_POOL8_SIZE 1
58#define BFI_IBIDX_POOL8_ENTRY_SIZE 8
59#define BFI_IBIDX_TOTAL_POOLS 3
60#define BFI_IBIDX_TOTAL_SEGS 119 /* (POOL1 + POOL2 + POOL8)_SIZE */
61#define BFI_IBIDX_MAX_SEGSIZE 8
62#define init_ibidx_pool(name) \
63static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
64{ \
65 { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE }, \
66 { BFI_IBIDX_POOL2_SIZE, BFI_IBIDX_POOL2_ENTRY_SIZE }, \
67 { BFI_IBIDX_POOL8_SIZE, BFI_IBIDX_POOL8_ENTRY_SIZE } \
68}
69
70/**
71 * There are 2 free RIT segment pools:
72 * Pool1: 192 segments of 1 RIT entry each
73 * Pool2: 1 segment of 64 RIT entry
74 */
75#define BFI_RIT_SEG_POOL1_SIZE 192
76#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
77#define BFI_RIT_SEG_POOLRSS_SIZE 1
78#define BFI_RIT_SEG_POOLRSS_ENTRY_SIZE 64
79#define BFI_RIT_SEG_TOTAL_POOLS 2
80#define BFI_RIT_TOTAL_SEGS 193 /* POOL1_SIZE + POOLRSS_SIZE */
81#define init_ritseg_pool(name) \
82static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
83{ \
84 { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE }, \
85 { BFI_RIT_SEG_POOLRSS_SIZE, BFI_RIT_SEG_POOLRSS_ENTRY_SIZE } \
86}
87
88#else /* BNA_BIOS_BUILD */
89
90#define BFI_MAX_TXQ 1
91#define BFI_MAX_RXQ 1
92#define BFI_MAX_RXF 1
93#define BFI_MAX_IB 2
94#define BFI_MAX_RIT_SIZE 2
95#define BFI_RSS_RIT_SIZE 64
96#define BFI_NONRSS_RIT_SIZE 1
97#define BFI_MAX_UCMAC 1
98#define BFI_MAX_MCMAC 8
99#define BFI_IBIDX_SIZE 4
100#define BFI_MAX_VLAN 4095
101/* There is one free pool: 2 segments of 1 index each */
102#define BFI_IBIDX_POOL1_SIZE 2
103#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
104#define BFI_IBIDX_TOTAL_POOLS 1
105#define BFI_IBIDX_TOTAL_SEGS 2 /* POOL1_SIZE */
106#define BFI_IBIDX_MAX_SEGSIZE 1
107#define init_ibidx_pool(name) \
108static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
109{ \
110 { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE } \
111}
112
113#define BFI_RIT_SEG_POOL1_SIZE 1
114#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
115#define BFI_RIT_SEG_TOTAL_POOLS 1
116#define BFI_RIT_TOTAL_SEGS 1 /* POOL1_SIZE */
117#define init_ritseg_pool(name) \
118static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
119{ \
120 { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE } \
121}
122
123#endif /* BNA_BIOS_BUILD */
124
125#define BFI_RSS_HASH_KEY_LEN 10
126
127#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
128#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
129#define BFI_MAX_INTERPKT_COUNT 0xFF
130#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
131#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
132#define BFI_TX_INTERPKT_COUNT 32
133#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
134#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
135#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
136
137#define BFI_TXQ_WI_SIZE 64 /* bytes */
138#define BFI_RXQ_WI_SIZE 8 /* bytes */
139#define BFI_CQ_WI_SIZE 16 /* bytes */
140#define BFI_TX_MAX_WRR_QUOTA 0xFFF
141
142#define BFI_TX_MAX_VECTORS_PER_WI 4
143#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
144#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
145#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
146
147/* Small Q buffer size */
148#define BFI_SMALL_RXBUF_SIZE 128
149
150/* Defined separately since BFA_FLASH_DMA_BUF_SZ is in bfa_flash.c */
151#define BFI_FLASH_DMA_BUF_SZ 0x010000 /* 64K DMA */
152#define BFI_HW_STATS_SIZE 0x4000 /* 16K DMA */
153
154/**
155 *
156 * HW register offsets, macros
157 *
158 */
159
160/* DMA Block Register Host Window Start Address */
161#define DMA_BLK_REG_ADDR 0x00013000
162
163/* DMA Block Internal Registers */
164#define DMA_CTRL_REG0 (DMA_BLK_REG_ADDR + 0x000)
165#define DMA_CTRL_REG1 (DMA_BLK_REG_ADDR + 0x004)
166#define DMA_ERR_INT_STATUS (DMA_BLK_REG_ADDR + 0x008)
167#define DMA_ERR_INT_ENABLE (DMA_BLK_REG_ADDR + 0x00c)
168#define DMA_ERR_INT_STATUS_SET (DMA_BLK_REG_ADDR + 0x010)
169
170/* APP Block Register Address Offset from BAR0 */
171#define APP_BLK_REG_ADDR 0x00014000
172
173/* Host Function Interrupt Mask Registers */
174#define HOSTFN0_INT_MASK (APP_BLK_REG_ADDR + 0x004)
175#define HOSTFN1_INT_MASK (APP_BLK_REG_ADDR + 0x104)
176#define HOSTFN2_INT_MASK (APP_BLK_REG_ADDR + 0x304)
177#define HOSTFN3_INT_MASK (APP_BLK_REG_ADDR + 0x404)
178
179/**
180 * Host Function PCIe Error Registers
181 * Duplicates "Correctable" & "Uncorrectable"
182 * registers in PCIe Config space.
183 */
184#define FN0_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x014)
185#define FN1_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x114)
186#define FN2_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x314)
187#define FN3_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x414)
188
189/* Host Function Error Type Status Registers */
190#define FN0_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x018)
191#define FN1_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x118)
192#define FN2_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x318)
193#define FN3_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x418)
194
195/* Host Function Error Type Mask Registers */
196#define FN0_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x01c)
197#define FN1_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x11c)
198#define FN2_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x31c)
199#define FN3_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x41c)
200
201/* Catapult Host Semaphore Status Registers (App block) */
202#define HOST_SEM_STS0_REG (APP_BLK_REG_ADDR + 0x630)
203#define HOST_SEM_STS1_REG (APP_BLK_REG_ADDR + 0x634)
204#define HOST_SEM_STS2_REG (APP_BLK_REG_ADDR + 0x638)
205#define HOST_SEM_STS3_REG (APP_BLK_REG_ADDR + 0x63c)
206#define HOST_SEM_STS4_REG (APP_BLK_REG_ADDR + 0x640)
207#define HOST_SEM_STS5_REG (APP_BLK_REG_ADDR + 0x644)
208#define HOST_SEM_STS6_REG (APP_BLK_REG_ADDR + 0x648)
209#define HOST_SEM_STS7_REG (APP_BLK_REG_ADDR + 0x64c)
210
211/* PCIe Misc Register */
212#define PCIE_MISC_REG (APP_BLK_REG_ADDR + 0x200)
213
214/* Temp Sensor Control Registers */
215#define TEMPSENSE_CNTL_REG (APP_BLK_REG_ADDR + 0x250)
216#define TEMPSENSE_STAT_REG (APP_BLK_REG_ADDR + 0x254)
217
218/* APP Block local error registers */
219#define APP_LOCAL_ERR_STAT (APP_BLK_REG_ADDR + 0x258)
220#define APP_LOCAL_ERR_MSK (APP_BLK_REG_ADDR + 0x25c)
221
222/* PCIe Link Error registers */
223#define PCIE_LNK_ERR_STAT (APP_BLK_REG_ADDR + 0x260)
224#define PCIE_LNK_ERR_MSK (APP_BLK_REG_ADDR + 0x264)
225
226/**
227 * FCoE/FIP Ethertype Register
228 * 31:16 -- Chip wide value for FIP type
229 * 15:0 -- Chip wide value for FCoE type
230 */
231#define FCOE_FIP_ETH_TYPE (APP_BLK_REG_ADDR + 0x280)
232
233/**
234 * Reserved Ethertype Register
235 * 31:16 -- Reserved
236 * 15:0 -- Other ethertype
237 */
238#define RESV_ETH_TYPE (APP_BLK_REG_ADDR + 0x284)
239
240/**
241 * Host Command Status Registers
242 * Each set consists of 3 registers :
243 * clear, set, cmd
244 * 16 such register sets in all
245 * See catapult_spec.pdf for detailed functionality
246 * Put each type in a single macro accessed by _num ?
247 */
248#define HOST_CMDSTS0_CLR_REG (APP_BLK_REG_ADDR + 0x500)
249#define HOST_CMDSTS0_SET_REG (APP_BLK_REG_ADDR + 0x504)
250#define HOST_CMDSTS0_REG (APP_BLK_REG_ADDR + 0x508)
251#define HOST_CMDSTS1_CLR_REG (APP_BLK_REG_ADDR + 0x510)
252#define HOST_CMDSTS1_SET_REG (APP_BLK_REG_ADDR + 0x514)
253#define HOST_CMDSTS1_REG (APP_BLK_REG_ADDR + 0x518)
254#define HOST_CMDSTS2_CLR_REG (APP_BLK_REG_ADDR + 0x520)
255#define HOST_CMDSTS2_SET_REG (APP_BLK_REG_ADDR + 0x524)
256#define HOST_CMDSTS2_REG (APP_BLK_REG_ADDR + 0x528)
257#define HOST_CMDSTS3_CLR_REG (APP_BLK_REG_ADDR + 0x530)
258#define HOST_CMDSTS3_SET_REG (APP_BLK_REG_ADDR + 0x534)
259#define HOST_CMDSTS3_REG (APP_BLK_REG_ADDR + 0x538)
260#define HOST_CMDSTS4_CLR_REG (APP_BLK_REG_ADDR + 0x540)
261#define HOST_CMDSTS4_SET_REG (APP_BLK_REG_ADDR + 0x544)
262#define HOST_CMDSTS4_REG (APP_BLK_REG_ADDR + 0x548)
263#define HOST_CMDSTS5_CLR_REG (APP_BLK_REG_ADDR + 0x550)
264#define HOST_CMDSTS5_SET_REG (APP_BLK_REG_ADDR + 0x554)
265#define HOST_CMDSTS5_REG (APP_BLK_REG_ADDR + 0x558)
266#define HOST_CMDSTS6_CLR_REG (APP_BLK_REG_ADDR + 0x560)
267#define HOST_CMDSTS6_SET_REG (APP_BLK_REG_ADDR + 0x564)
268#define HOST_CMDSTS6_REG (APP_BLK_REG_ADDR + 0x568)
269#define HOST_CMDSTS7_CLR_REG (APP_BLK_REG_ADDR + 0x570)
270#define HOST_CMDSTS7_SET_REG (APP_BLK_REG_ADDR + 0x574)
271#define HOST_CMDSTS7_REG (APP_BLK_REG_ADDR + 0x578)
272#define HOST_CMDSTS8_CLR_REG (APP_BLK_REG_ADDR + 0x580)
273#define HOST_CMDSTS8_SET_REG (APP_BLK_REG_ADDR + 0x584)
274#define HOST_CMDSTS8_REG (APP_BLK_REG_ADDR + 0x588)
275#define HOST_CMDSTS9_CLR_REG (APP_BLK_REG_ADDR + 0x590)
276#define HOST_CMDSTS9_SET_REG (APP_BLK_REG_ADDR + 0x594)
277#define HOST_CMDSTS9_REG (APP_BLK_REG_ADDR + 0x598)
278#define HOST_CMDSTS10_CLR_REG (APP_BLK_REG_ADDR + 0x5A0)
279#define HOST_CMDSTS10_SET_REG (APP_BLK_REG_ADDR + 0x5A4)
280#define HOST_CMDSTS10_REG (APP_BLK_REG_ADDR + 0x5A8)
281#define HOST_CMDSTS11_CLR_REG (APP_BLK_REG_ADDR + 0x5B0)
282#define HOST_CMDSTS11_SET_REG (APP_BLK_REG_ADDR + 0x5B4)
283#define HOST_CMDSTS11_REG (APP_BLK_REG_ADDR + 0x5B8)
284#define HOST_CMDSTS12_CLR_REG (APP_BLK_REG_ADDR + 0x5C0)
285#define HOST_CMDSTS12_SET_REG (APP_BLK_REG_ADDR + 0x5C4)
286#define HOST_CMDSTS12_REG (APP_BLK_REG_ADDR + 0x5C8)
287#define HOST_CMDSTS13_CLR_REG (APP_BLK_REG_ADDR + 0x5D0)
288#define HOST_CMDSTS13_SET_REG (APP_BLK_REG_ADDR + 0x5D4)
289#define HOST_CMDSTS13_REG (APP_BLK_REG_ADDR + 0x5D8)
290#define HOST_CMDSTS14_CLR_REG (APP_BLK_REG_ADDR + 0x5E0)
291#define HOST_CMDSTS14_SET_REG (APP_BLK_REG_ADDR + 0x5E4)
292#define HOST_CMDSTS14_REG (APP_BLK_REG_ADDR + 0x5E8)
293#define HOST_CMDSTS15_CLR_REG (APP_BLK_REG_ADDR + 0x5F0)
294#define HOST_CMDSTS15_SET_REG (APP_BLK_REG_ADDR + 0x5F4)
295#define HOST_CMDSTS15_REG (APP_BLK_REG_ADDR + 0x5F8)
296
297/**
298 * LPU0 Block Register Address Offset from BAR0
299 * Range 0x18000 - 0x18033
300 */
301#define LPU0_BLK_REG_ADDR 0x00018000
302
303/**
304 * LPU0 Registers
305 * Should they be directly used from host,
306 * except for diagnostics ?
307 * CTL_REG : Control register
308 * CMD_REG : Triggers exec. of cmd. in
309 * Mailbox memory
310 */
311#define LPU0_MBOX_CTL_REG (LPU0_BLK_REG_ADDR + 0x000)
312#define LPU0_MBOX_CMD_REG (LPU0_BLK_REG_ADDR + 0x004)
313#define LPU0_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x008)
314#define LPU1_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x00c)
315#define LPU0_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x010)
316#define LPU1_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x014)
317#define LPU0_ERR_STATUS_REG (LPU0_BLK_REG_ADDR + 0x018)
318#define LPU0_ERR_SET_REG (LPU0_BLK_REG_ADDR + 0x020)
319
320/**
321 * LPU1 Block Register Address Offset from BAR0
322 * Range 0x18400 - 0x18433
323 */
324#define LPU1_BLK_REG_ADDR 0x00018400
325
326/**
327 * LPU1 Registers
328 * Same as LPU0 registers above
329 */
330#define LPU1_MBOX_CTL_REG (LPU1_BLK_REG_ADDR + 0x000)
331#define LPU1_MBOX_CMD_REG (LPU1_BLK_REG_ADDR + 0x004)
332#define LPU0_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x008)
333#define LPU1_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x00c)
334#define LPU0_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x010)
335#define LPU1_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x014)
336#define LPU1_ERR_STATUS_REG (LPU1_BLK_REG_ADDR + 0x018)
337#define LPU1_ERR_SET_REG (LPU1_BLK_REG_ADDR + 0x020)
338
339/**
340 * PSS Block Register Address Offset from BAR0
341 * Range 0x18800 - 0x188DB
342 */
343#define PSS_BLK_REG_ADDR 0x00018800
344
345/**
346 * PSS Registers
347 * For details, see catapult_spec.pdf
348 * ERR_STATUS_REG : Indicates error in PSS module
349 * RAM_ERR_STATUS_REG : Indicates RAM module that detected error
350 */
351#define ERR_STATUS_SET (PSS_BLK_REG_ADDR + 0x018)
352#define PSS_RAM_ERR_STATUS_REG (PSS_BLK_REG_ADDR + 0x01C)
353
354/**
355 * PSS Semaphore Lock Registers, total 16
356 * First read when unlocked returns 0,
357 * and is set to 1, atomically.
358 * Subsequent reads returns 1.
359 * To clear set the value to 0.
360 * Range : 0x20 to 0x5c
361 */
362#define PSS_SEM_LOCK_REG(_num) \
363 (PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
364
365/**
366 * PSS Semaphore Status Registers,
367 * corresponding to the lock registers above
368 */
369#define PSS_SEM_STATUS_REG(_num) \
370 (PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
371
372/**
373 * Catapult CPQ Registers
374 * Defines for Mailbox Registers
375 * Used to send mailbox commands to firmware from
376 * host. The data part is written to the MBox
377 * memory, registers are used to indicate that
378 * a commnad is resident in memory.
379 *
380 * Note : LPU0<->LPU1 mailboxes are not listed here
381 */
382#define CPQ_BLK_REG_ADDR 0x00019000
383
384#define HOSTFN0_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x130)
385#define HOSTFN0_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x134)
386#define LPU0_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x138)
387#define LPU1_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x13C)
388
389#define HOSTFN1_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x140)
390#define HOSTFN1_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x144)
391#define LPU0_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x148)
392#define LPU1_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x14C)
393
394#define HOSTFN2_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x170)
395#define HOSTFN2_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x174)
396#define LPU0_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x178)
397#define LPU1_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x17C)
398
399#define HOSTFN3_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x180)
400#define HOSTFN3_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x184)
401#define LPU0_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x188)
402#define LPU1_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x18C)
403
404/* Host Function Force Parity Error Registers */
405#define HOSTFN0_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x120)
406#define HOSTFN1_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x124)
407#define HOSTFN2_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x128)
408#define HOSTFN3_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x12C)
409
410/* LL Port[0|1] Halt Mask Registers */
411#define LL_HALT_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1A0)
412#define LL_HALT_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1B0)
413
414/* LL Port[0|1] Error Mask Registers */
415#define LL_ERR_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1D0)
416#define LL_ERR_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1D4)
417
418/* EMC FLI (Flash Controller) Block Register Address Offset from BAR0 */
419#define FLI_BLK_REG_ADDR 0x0001D000
420
421/* EMC FLI Registers */
422#define FLI_CMD_REG (FLI_BLK_REG_ADDR + 0x000)
423#define FLI_ADDR_REG (FLI_BLK_REG_ADDR + 0x004)
424#define FLI_CTL_REG (FLI_BLK_REG_ADDR + 0x008)
425#define FLI_WRDATA_REG (FLI_BLK_REG_ADDR + 0x00C)
426#define FLI_RDDATA_REG (FLI_BLK_REG_ADDR + 0x010)
427#define FLI_DEV_STATUS_REG (FLI_BLK_REG_ADDR + 0x014)
428#define FLI_SIG_WD_REG (FLI_BLK_REG_ADDR + 0x018)
429
430/**
431 * RO register
432 * 31:16 -- Vendor Id
433 * 15:0 -- Device Id
434 */
435#define FLI_DEV_VENDOR_REG (FLI_BLK_REG_ADDR + 0x01C)
436#define FLI_ERR_STATUS_REG (FLI_BLK_REG_ADDR + 0x020)
437
438/**
439 * RAD (RxAdm) Block Register Address Offset from BAR0
440 * RAD0 Range : 0x20000 - 0x203FF
441 * RAD1 Range : 0x20400 - 0x207FF
442 */
443#define RAD0_BLK_REG_ADDR 0x00020000
444#define RAD1_BLK_REG_ADDR 0x00020400
445
446/* RAD0 Registers */
447#define RAD0_CTL_REG (RAD0_BLK_REG_ADDR + 0x000)
448#define RAD0_PE_PARM_REG (RAD0_BLK_REG_ADDR + 0x004)
449#define RAD0_BCN_REG (RAD0_BLK_REG_ADDR + 0x008)
450
451/* Default function ID register */
452#define RAD0_DEFAULT_REG (RAD0_BLK_REG_ADDR + 0x00C)
453
454/* Default promiscuous ID register */
455#define RAD0_PROMISC_REG (RAD0_BLK_REG_ADDR + 0x010)
456
457#define RAD0_BCNQ_REG (RAD0_BLK_REG_ADDR + 0x014)
458
459/*
460 * This register selects 1 of 8 PM Q's using
461 * VLAN pri, for non-BCN packets without a VLAN tag
462 */
463#define RAD0_DEFAULTQ_REG (RAD0_BLK_REG_ADDR + 0x018)
464
465#define RAD0_ERR_STS (RAD0_BLK_REG_ADDR + 0x01C)
466#define RAD0_SET_ERR_STS (RAD0_BLK_REG_ADDR + 0x020)
467#define RAD0_ERR_INT_EN (RAD0_BLK_REG_ADDR + 0x024)
468#define RAD0_FIRST_ERR (RAD0_BLK_REG_ADDR + 0x028)
469#define RAD0_FORCE_ERR (RAD0_BLK_REG_ADDR + 0x02C)
470
471#define RAD0_IF_RCVD (RAD0_BLK_REG_ADDR + 0x030)
472#define RAD0_IF_RCVD_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x034)
473#define RAD0_IF_RCVD_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x038)
474#define RAD0_IF_RCVD_VLAN (RAD0_BLK_REG_ADDR + 0x03C)
475#define RAD0_IF_RCVD_UCAST (RAD0_BLK_REG_ADDR + 0x040)
476#define RAD0_IF_RCVD_UCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x044)
477#define RAD0_IF_RCVD_UCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x048)
478#define RAD0_IF_RCVD_UCAST_VLAN (RAD0_BLK_REG_ADDR + 0x04C)
479#define RAD0_IF_RCVD_MCAST (RAD0_BLK_REG_ADDR + 0x050)
480#define RAD0_IF_RCVD_MCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x054)
481#define RAD0_IF_RCVD_MCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x058)
482#define RAD0_IF_RCVD_MCAST_VLAN (RAD0_BLK_REG_ADDR + 0x05C)
483#define RAD0_IF_RCVD_BCAST (RAD0_BLK_REG_ADDR + 0x060)
484#define RAD0_IF_RCVD_BCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x064)
485#define RAD0_IF_RCVD_BCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x068)
486#define RAD0_IF_RCVD_BCAST_VLAN (RAD0_BLK_REG_ADDR + 0x06C)
487#define RAD0_DROPPED_FRAMES (RAD0_BLK_REG_ADDR + 0x070)
488
489#define RAD0_MAC_MAN_1H (RAD0_BLK_REG_ADDR + 0x080)
490#define RAD0_MAC_MAN_1L (RAD0_BLK_REG_ADDR + 0x084)
491#define RAD0_MAC_MAN_2H (RAD0_BLK_REG_ADDR + 0x088)
492#define RAD0_MAC_MAN_2L (RAD0_BLK_REG_ADDR + 0x08C)
493#define RAD0_MAC_MAN_3H (RAD0_BLK_REG_ADDR + 0x090)
494#define RAD0_MAC_MAN_3L (RAD0_BLK_REG_ADDR + 0x094)
495#define RAD0_MAC_MAN_4H (RAD0_BLK_REG_ADDR + 0x098)
496#define RAD0_MAC_MAN_4L (RAD0_BLK_REG_ADDR + 0x09C)
497
498#define RAD0_LAST4_IP (RAD0_BLK_REG_ADDR + 0x100)
499
500/* RAD1 Registers */
501#define RAD1_CTL_REG (RAD1_BLK_REG_ADDR + 0x000)
502#define RAD1_PE_PARM_REG (RAD1_BLK_REG_ADDR + 0x004)
503#define RAD1_BCN_REG (RAD1_BLK_REG_ADDR + 0x008)
504
505/* Default function ID register */
506#define RAD1_DEFAULT_REG (RAD1_BLK_REG_ADDR + 0x00C)
507
508/* Promiscuous function ID register */
509#define RAD1_PROMISC_REG (RAD1_BLK_REG_ADDR + 0x010)
510
511#define RAD1_BCNQ_REG (RAD1_BLK_REG_ADDR + 0x014)
512
513/*
514 * This register selects 1 of 8 PM Q's using
515 * VLAN pri, for non-BCN packets without a VLAN tag
516 */
517#define RAD1_DEFAULTQ_REG (RAD1_BLK_REG_ADDR + 0x018)
518
519#define RAD1_ERR_STS (RAD1_BLK_REG_ADDR + 0x01C)
520#define RAD1_SET_ERR_STS (RAD1_BLK_REG_ADDR + 0x020)
521#define RAD1_ERR_INT_EN (RAD1_BLK_REG_ADDR + 0x024)
522
523/**
524 * TXA Block Register Address Offset from BAR0
525 * TXA0 Range : 0x21000 - 0x213FF
526 * TXA1 Range : 0x21400 - 0x217FF
527 */
528#define TXA0_BLK_REG_ADDR 0x00021000
529#define TXA1_BLK_REG_ADDR 0x00021400
530
531/* TXA Registers */
532#define TXA0_CTRL_REG (TXA0_BLK_REG_ADDR + 0x000)
533#define TXA1_CTRL_REG (TXA1_BLK_REG_ADDR + 0x000)
534
535/**
536 * TSO Sequence # Registers (RO)
537 * Total 8 (for 8 queues)
538 * Holds the last seq.# for TSO frames
539 * See catapult_spec.pdf for more details
540 */
541#define TXA0_TSO_TCP_SEQ_REG(_num) \
542 (TXA0_BLK_REG_ADDR + 0x020 + ((_num) << 2))
543
544#define TXA1_TSO_TCP_SEQ_REG(_num) \
545 (TXA1_BLK_REG_ADDR + 0x020 + ((_num) << 2))
546
547/**
548 * TSO IP ID # Registers (RO)
549 * Total 8 (for 8 queues)
550 * Holds the last IP ID for TSO frames
551 * See catapult_spec.pdf for more details
552 */
553#define TXA0_TSO_IP_INFO_REG(_num) \
554 (TXA0_BLK_REG_ADDR + 0x040 + ((_num) << 2))
555
556#define TXA1_TSO_IP_INFO_REG(_num) \
557 (TXA1_BLK_REG_ADDR + 0x040 + ((_num) << 2))
558
559/**
560 * RXA Block Register Address Offset from BAR0
561 * RXA0 Range : 0x21800 - 0x21BFF
562 * RXA1 Range : 0x21C00 - 0x21FFF
563 */
564#define RXA0_BLK_REG_ADDR 0x00021800
565#define RXA1_BLK_REG_ADDR 0x00021C00
566
567/* RXA Registers */
568#define RXA0_CTL_REG (RXA0_BLK_REG_ADDR + 0x040)
569#define RXA1_CTL_REG (RXA1_BLK_REG_ADDR + 0x040)
570
571/**
572 * PPLB Block Register Address Offset from BAR0
573 * PPLB0 Range : 0x22000 - 0x223FF
574 * PPLB1 Range : 0x22400 - 0x227FF
575 */
576#define PLB0_BLK_REG_ADDR 0x00022000
577#define PLB1_BLK_REG_ADDR 0x00022400
578
579/**
580 * PLB Registers
581 * Holds RL timer used time stamps in RLT tagged frames
582 */
583#define PLB0_ECM_TIMER_REG (PLB0_BLK_REG_ADDR + 0x05C)
584#define PLB1_ECM_TIMER_REG (PLB1_BLK_REG_ADDR + 0x05C)
585
586/* Controls the rate-limiter on each of the priority class */
587#define PLB0_RL_CTL (PLB0_BLK_REG_ADDR + 0x060)
588#define PLB1_RL_CTL (PLB1_BLK_REG_ADDR + 0x060)
589
590/**
591 * Max byte register, total 8, 0-7
592 * see catapult_spec.pdf for details
593 */
594#define PLB0_RL_MAX_BC(_num) \
595 (PLB0_BLK_REG_ADDR + 0x064 + ((_num) << 2))
596#define PLB1_RL_MAX_BC(_num) \
597 (PLB1_BLK_REG_ADDR + 0x064 + ((_num) << 2))
598
599/**
600 * RL Time Unit Register for priority 0-7
601 * 4 bits per priority
602 * (2^rl_unit)*1us is the actual time period
603 */
604#define PLB0_RL_TU_PRIO (PLB0_BLK_REG_ADDR + 0x084)
605#define PLB1_RL_TU_PRIO (PLB1_BLK_REG_ADDR + 0x084)
606
607/**
608 * RL byte count register,
609 * bytes transmitted in (rl_unit*1)us time period
610 * 1 per priority, 8 in all, 0-7.
611 */
612#define PLB0_RL_BYTE_CNT(_num) \
613 (PLB0_BLK_REG_ADDR + 0x088 + ((_num) << 2))
614#define PLB1_RL_BYTE_CNT(_num) \
615 (PLB1_BLK_REG_ADDR + 0x088 + ((_num) << 2))
616
617/**
618 * RL Min factor register
619 * 2 bits per priority,
620 * 4 factors possible: 1, 0.5, 0.25, 0
621 * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
622 */
623#define PLB0_RL_MIN_REG (PLB0_BLK_REG_ADDR + 0x0A8)
624#define PLB1_RL_MIN_REG (PLB1_BLK_REG_ADDR + 0x0A8)
625
626/**
627 * RL Max factor register
628 * 2 bits per priority,
629 * 4 factors possible: 1, 0.5, 0.25, 0
630 * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
631 */
632#define PLB0_RL_MAX_REG (PLB0_BLK_REG_ADDR + 0x0AC)
633#define PLB1_RL_MAX_REG (PLB1_BLK_REG_ADDR + 0x0AC)
634
635/* MAC SERDES Address Paging register */
636#define PLB0_EMS_ADD_REG (PLB0_BLK_REG_ADDR + 0xD0)
637#define PLB1_EMS_ADD_REG (PLB1_BLK_REG_ADDR + 0xD0)
638
639/* LL EMS Registers */
640#define LL_EMS0_BLK_REG_ADDR 0x00026800
641#define LL_EMS1_BLK_REG_ADDR 0x00026C00
642
643/**
644 * BPC Block Register Address Offset from BAR0
645 * BPC0 Range : 0x23000 - 0x233FF
646 * BPC1 Range : 0x23400 - 0x237FF
647 */
648#define BPC0_BLK_REG_ADDR 0x00023000
649#define BPC1_BLK_REG_ADDR 0x00023400
650
651/**
652 * PMM Block Register Address Offset from BAR0
653 * PMM0 Range : 0x23800 - 0x23BFF
654 * PMM1 Range : 0x23C00 - 0x23FFF
655 */
656#define PMM0_BLK_REG_ADDR 0x00023800
657#define PMM1_BLK_REG_ADDR 0x00023C00
658
659/**
660 * HQM Block Register Address Offset from BAR0
661 * HQM0 Range : 0x24000 - 0x243FF
662 * HQM1 Range : 0x24400 - 0x247FF
663 */
664#define HQM0_BLK_REG_ADDR 0x00024000
665#define HQM1_BLK_REG_ADDR 0x00024400
666
667/**
668 * HQM Control Register
669 * Controls some aspects of IB
670 * See catapult_spec.pdf for details
671 */
672#define HQM0_CTL_REG (HQM0_BLK_REG_ADDR + 0x000)
673#define HQM1_CTL_REG (HQM1_BLK_REG_ADDR + 0x000)
674
675/**
676 * HQM Stop Q Semaphore Registers.
677 * Only one Queue resource can be stopped at
678 * any given time. This register controls access
679 * to the single stop Q resource.
680 * See catapult_spec.pdf for details
681 */
682#define HQM0_RXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x028)
683#define HQM0_TXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x02C)
684#define HQM1_RXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x028)
685#define HQM1_TXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x02C)
686
687/**
688 * LUT Block Register Address Offset from BAR0
689 * LUT0 Range : 0x25800 - 0x25BFF
690 * LUT1 Range : 0x25C00 - 0x25FFF
691 */
692#define LUT0_BLK_REG_ADDR 0x00025800
693#define LUT1_BLK_REG_ADDR 0x00025C00
694
695/**
696 * LUT Registers
697 * See catapult_spec.pdf for details
698 */
699#define LUT0_ERR_STS (LUT0_BLK_REG_ADDR + 0x000)
700#define LUT1_ERR_STS (LUT1_BLK_REG_ADDR + 0x000)
701#define LUT0_SET_ERR_STS (LUT0_BLK_REG_ADDR + 0x004)
702#define LUT1_SET_ERR_STS (LUT1_BLK_REG_ADDR + 0x004)
703
704/**
705 * TRC (Debug/Trace) Register Offset from BAR0
706 * Range : 0x26000 -- 0x263FFF
707 */
708#define TRC_BLK_REG_ADDR 0x00026000
709
710/**
711 * TRC Registers
712 * See catapult_spec.pdf for details of each
713 */
714#define TRC_CTL_REG (TRC_BLK_REG_ADDR + 0x000)
715#define TRC_MODS_REG (TRC_BLK_REG_ADDR + 0x004)
716#define TRC_TRGC_REG (TRC_BLK_REG_ADDR + 0x008)
717#define TRC_CNT1_REG (TRC_BLK_REG_ADDR + 0x010)
718#define TRC_CNT2_REG (TRC_BLK_REG_ADDR + 0x014)
719#define TRC_NXTS_REG (TRC_BLK_REG_ADDR + 0x018)
720#define TRC_DIRR_REG (TRC_BLK_REG_ADDR + 0x01C)
721
722/**
723 * TRC Trigger match filters, total 10
724 * Determines the trigger condition
725 */
726#define TRC_TRGM_REG(_num) \
727 (TRC_BLK_REG_ADDR + 0x040 + ((_num) << 2))
728
729/**
730 * TRC Next State filters, total 10
731 * Determines the next state conditions
732 */
733#define TRC_NXTM_REG(_num) \
734 (TRC_BLK_REG_ADDR + 0x080 + ((_num) << 2))
735
736/**
737 * TRC Store Match filters, total 10
738 * Determines the store conditions
739 */
740#define TRC_STRM_REG(_num) \
741 (TRC_BLK_REG_ADDR + 0x0C0 + ((_num) << 2))
742
743/* DOORBELLS ACCESS */
744
745/**
746 * Catapult doorbells
747 * Each doorbell-queue set has
748 * 1 RxQ, 1 TxQ, 2 IBs in that order
749 * Size of each entry in 32 bytes, even though only 1 word
750 * is used. For Non-VM case each doorbell-q set is
751 * separated by 128 bytes, for VM case it is separated
752 * by 4K bytes
753 * Non VM case Range : 0x38000 - 0x39FFF
754 * VM case Range : 0x100000 - 0x11FFFF
755 * The range applies to both HQMs
756 */
757#define HQM_DOORBELL_BLK_BASE_ADDR 0x00038000
758#define HQM_DOORBELL_VM_BLK_BASE_ADDR 0x00100000
759
760/* MEMORY ACCESS */
761
762/**
763 * Catapult H/W Block Memory Access Address
764 * To the host a memory space of 32K (page) is visible
765 * at a time. The address range is from 0x08000 to 0x0FFFF
766 */
767#define HW_BLK_HOST_MEM_ADDR 0x08000
768
769/**
770 * Catapult LUT Memory Access Page Numbers
771 * Range : LUT0 0xa0-0xa1
772 * LUT1 0xa2-0xa3
773 */
774#define LUT0_MEM_BLK_BASE_PG_NUM 0x000000A0
775#define LUT1_MEM_BLK_BASE_PG_NUM 0x000000A2
776
777/**
778 * Catapult RxFn Database Memory Block Base Offset
779 *
780 * The Rx function database exists in LUT block.
781 * In PCIe space this is accessible as a 256x32
782 * bit block. Each entry in this database is 4
783 * (4 byte) words. Max. entries is 64.
784 * Address of an entry corresponding to a function
785 * = base_addr + (function_no. * 16)
786 */
787#define RX_FNDB_RAM_BASE_OFFSET 0x0000B400
788
789/**
790 * Catapult TxFn Database Memory Block Base Offset Address
791 *
792 * The Tx function database exists in LUT block.
793 * In PCIe space this is accessible as a 64x32
794 * bit block. Each entry in this database is 1
795 * (4 byte) word. Max. entries is 64.
796 * Address of an entry corresponding to a function
797 * = base_addr + (function_no. * 4)
798 */
799#define TX_FNDB_RAM_BASE_OFFSET 0x0000B800
800
801/**
802 * Catapult Unicast CAM Base Offset Address
803 *
804 * Exists in LUT memory space.
805 * Shared by both the LL & FCoE driver.
806 * Size is 256x48 bits; mapped to PCIe space
807 * 512x32 bit blocks. For each address, bits
808 * are written in the order : [47:32] and then
809 * [31:0].
810 */
811#define UCAST_CAM_BASE_OFFSET 0x0000A800
812
813/**
814 * Catapult Unicast RAM Base Offset Address
815 *
816 * Exists in LUT memory space.
817 * Shared by both the LL & FCoE driver.
818 * Size is 256x9 bits.
819 */
820#define UCAST_RAM_BASE_OFFSET 0x0000B000
821
822/**
823 * Catapult Mulicast CAM Base Offset Address
824 *
825 * Exists in LUT memory space.
826 * Shared by both the LL & FCoE driver.
827 * Size is 256x48 bits; mapped to PCIe space
828 * 512x32 bit blocks. For each address, bits
829 * are written in the order : [47:32] and then
830 * [31:0].
831 */
832#define MCAST_CAM_BASE_OFFSET 0x0000A000
833
834/**
835 * Catapult VLAN RAM Base Offset Address
836 *
837 * Exists in LUT memory space.
838 * Size is 4096x66 bits; mapped to PCIe space as
839 * 8192x32 bit blocks.
840 * All the 4K entries are within the address range
841 * 0x0000 to 0x8000, so in the first LUT page.
842 */
843#define VLAN_RAM_BASE_OFFSET 0x00000000
844
845/**
846 * Catapult Tx Stats RAM Base Offset Address
847 *
848 * Exists in LUT memory space.
849 * Size is 1024x33 bits;
850 * Each Tx function has 64 bytes of space
851 */
852#define TX_STATS_RAM_BASE_OFFSET 0x00009000
853
854/**
855 * Catapult Rx Stats RAM Base Offset Address
856 *
857 * Exists in LUT memory space.
858 * Size is 1024x33 bits;
859 * Each Rx function has 64 bytes of space
860 */
861#define RX_STATS_RAM_BASE_OFFSET 0x00008000
862
863/* Catapult RXA Memory Access Page Numbers */
864#define RXA0_MEM_BLK_BASE_PG_NUM 0x0000008C
865#define RXA1_MEM_BLK_BASE_PG_NUM 0x0000008D
866
867/**
868 * Catapult Multicast Vector Table Base Offset Address
869 *
870 * Exists in RxA memory space.
871 * Organized as 512x65 bit block.
872 * However for each entry 16 bytes allocated (power of 2)
873 * Total size 512*16 bytes.
874 * There are two logical divisions, 256 entries each :
875 * a) Entries 0x00 to 0xff (256) -- Approx. MVT
876 * Offset 0x000 to 0xFFF
877 * b) Entries 0x100 to 0x1ff (256) -- Exact MVT
878 * Offsets 0x1000 to 0x1FFF
879 */
880#define MCAST_APPROX_MVT_BASE_OFFSET 0x00000000
881#define MCAST_EXACT_MVT_BASE_OFFSET 0x00001000
882
883/**
884 * Catapult RxQ Translate Table (RIT) Base Offset Address
885 *
886 * Exists in RxA memory space
887 * Total no. of entries 64
888 * Each entry is 1 (4 byte) word.
889 * 31:12 -- Reserved
890 * 11:0 -- Two 6 bit RxQ Ids
891 */
892#define FUNCTION_TO_RXQ_TRANSLATE 0x00002000
893
894/* Catapult RxAdm (RAD) Memory Access Page Numbers */
895#define RAD0_MEM_BLK_BASE_PG_NUM 0x00000086
896#define RAD1_MEM_BLK_BASE_PG_NUM 0x00000087
897
898/**
899 * Catapult RSS Table Base Offset Address
900 *
901 * Exists in RAD memory space.
902 * Each entry is 352 bits, but aligned on
903 * 64 byte (512 bit) boundary. Accessed
904 * 4 byte words, the whole entry can be
905 * broken into 11 word accesses.
906 */
907#define RSS_TABLE_BASE_OFFSET 0x00000800
908
909/**
910 * Catapult CPQ Block Page Number
911 * This value is written to the page number registers
912 * to access the memory associated with the mailboxes.
913 */
914#define CPQ_BLK_PG_NUM 0x00000005
915
916/**
917 * Clarification :
918 * LL functions are 2 & 3; can HostFn0/HostFn1
919 * <-> LPU0/LPU1 memories be used ?
920 */
921/**
922 * Catapult HostFn0/HostFn1 to LPU0/LPU1 Mbox memory
923 * Per catapult_spec.pdf, the offset of the mbox
924 * memory is in the register space at an offset of 0x200
925 */
926#define CPQ_BLK_REG_MBOX_ADDR (CPQ_BLK_REG_ADDR + 0x200)
927
928#define HOSTFN_LPU_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x000)
929
930/* Catapult LPU0/LPU1 to HostFn0/HostFn1 Mbox memory */
931#define LPU_HOSTFN_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x080)
932
933/**
934 * Catapult HQM Block Page Number
935 * This is written to the page number register for
936 * the appropriate function to access the memory
937 * associated with HQM
938 */
939#define HQM0_BLK_PG_NUM 0x00000096
940#define HQM1_BLK_PG_NUM 0x00000097
941
942/**
943 * Note that TxQ and RxQ entries are interlaced
944 * the HQM memory, i.e RXQ0, TXQ0, RXQ1, TXQ1.. etc.
945 */
946
947#define HQM_RXTX_Q_RAM_BASE_OFFSET 0x00004000
948
949/**
950 * CQ Memory
951 * Exists in HQM Memory space
952 * Each entry is 16 (4 byte) words of which
953 * only 12 words are used for configuration
954 * Total 64 entries per HQM memory space
955 */
956#define HQM_CQ_RAM_BASE_OFFSET 0x00006000
957
958/**
959 * Interrupt Block (IB) Memory
960 * Exists in HQM Memory space
961 * Each entry is 8 (4 byte) words of which
962 * only 5 words are used for configuration
963 * Total 128 entries per HQM memory space
964 */
965#define HQM_IB_RAM_BASE_OFFSET 0x00001000
966
967/**
968 * Index Table (IT) Memory
969 * Exists in HQM Memory space
970 * Each entry is 1 (4 byte) word which
971 * is used for configuration
972 * Total 128 entries per HQM memory space
973 */
974#define HQM_INDX_TBL_RAM_BASE_OFFSET 0x00002000
975
976/**
977 * PSS Block Memory Page Number
978 * This is written to the appropriate page number
979 * register to access the CPU memory.
980 * Also known as the PSS secondary memory (SMEM).
981 * Range : 0x180 to 0x1CF
982 * See catapult_spec.pdf for details
983 */
984#define PSS_BLK_PG_NUM 0x00000180
985
986/**
987 * Offsets of different instances of PSS SMEM
988 * 2.5M of continuous 1T memory space : 2 blocks
989 * of 1M each (32 pages each, page=32KB) and 4 smaller
990 * blocks of 128K each (4 pages each, page=32KB)
991 * PSS_LMEM_INST0 is used for firmware download
992 */
993#define PSS_LMEM_INST0 0x00000000
994#define PSS_LMEM_INST1 0x00100000
995#define PSS_LMEM_INST2 0x00200000
996#define PSS_LMEM_INST3 0x00220000
997#define PSS_LMEM_INST4 0x00240000
998#define PSS_LMEM_INST5 0x00260000
999
1000#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
1001
1002#define BNA_GET_PAGE_NUM(_base_page, _offset) \
1003 ((_base_page) + ((_offset) >> 15))
1004
1005#define BNA_GET_PAGE_OFFSET(_offset) \
1006 ((_offset) & 0x7fff)
1007
1008#define BNA_GET_MEM_BASE_ADDR(_bar0, _base_offset) \
1009 ((_bar0) + HW_BLK_HOST_MEM_ADDR \
1010 + BNA_GET_PAGE_OFFSET((_base_offset)))
1011
1012#define BNA_GET_VLAN_MEM_ENTRY_ADDR(_bar0, _fn_id, _vlan_id)\
1013 (_bar0 + (HW_BLK_HOST_MEM_ADDR) \
1014 + (BNA_GET_PAGE_OFFSET(VLAN_RAM_BASE_OFFSET)) \
1015 + (((_fn_id) & 0x3f) << 9) \
1016 + (((_vlan_id) & 0xfe0) >> 3))
1017
1018/**
1019 *
1020 * Interrupt related bits, flags and macros
1021 *
1022 */
1023
1024#define __LPU02HOST_MBOX0_STATUS_BITS 0x00100000
1025#define __LPU12HOST_MBOX0_STATUS_BITS 0x00200000
1026#define __LPU02HOST_MBOX1_STATUS_BITS 0x00400000
1027#define __LPU12HOST_MBOX1_STATUS_BITS 0x00800000
1028
1029#define __LPU02HOST_MBOX0_MASK_BITS 0x00100000
1030#define __LPU12HOST_MBOX0_MASK_BITS 0x00200000
1031#define __LPU02HOST_MBOX1_MASK_BITS 0x00400000
1032#define __LPU12HOST_MBOX1_MASK_BITS 0x00800000
1033
1034#define __LPU2HOST_MBOX_MASK_BITS \
1035 (__LPU02HOST_MBOX0_MASK_BITS | __LPU02HOST_MBOX1_MASK_BITS | \
1036 __LPU12HOST_MBOX0_MASK_BITS | __LPU12HOST_MBOX1_MASK_BITS)
1037
1038#define __LPU2HOST_IB_STATUS_BITS 0x0000ffff
1039
1040#define BNA_IS_LPU0_MBOX_INTR(_intr_status) \
1041 ((_intr_status) & (__LPU02HOST_MBOX0_STATUS_BITS | \
1042 __LPU02HOST_MBOX1_STATUS_BITS))
1043
1044#define BNA_IS_LPU1_MBOX_INTR(_intr_status) \
1045 ((_intr_status) & (__LPU12HOST_MBOX0_STATUS_BITS | \
1046 __LPU12HOST_MBOX1_STATUS_BITS))
1047
1048#define BNA_IS_MBOX_INTR(_intr_status) \
1049 ((_intr_status) & \
1050 (__LPU02HOST_MBOX0_STATUS_BITS | \
1051 __LPU02HOST_MBOX1_STATUS_BITS | \
1052 __LPU12HOST_MBOX0_STATUS_BITS | \
1053 __LPU12HOST_MBOX1_STATUS_BITS))
1054
1055#define __EMC_ERROR_STATUS_BITS 0x00010000
1056#define __LPU0_ERROR_STATUS_BITS 0x00020000
1057#define __LPU1_ERROR_STATUS_BITS 0x00040000
1058#define __PSS_ERROR_STATUS_BITS 0x00080000
1059
1060#define __HALT_STATUS_BITS 0x01000000
1061
1062#define __EMC_ERROR_MASK_BITS 0x00010000
1063#define __LPU0_ERROR_MASK_BITS 0x00020000
1064#define __LPU1_ERROR_MASK_BITS 0x00040000
1065#define __PSS_ERROR_MASK_BITS 0x00080000
1066
1067#define __HALT_MASK_BITS 0x01000000
1068
1069#define __ERROR_MASK_BITS \
1070 (__EMC_ERROR_MASK_BITS | __LPU0_ERROR_MASK_BITS | \
1071 __LPU1_ERROR_MASK_BITS | __PSS_ERROR_MASK_BITS | \
1072 __HALT_MASK_BITS)
1073
1074#define BNA_IS_ERR_INTR(_intr_status) \
1075 ((_intr_status) & \
1076 (__EMC_ERROR_STATUS_BITS | \
1077 __LPU0_ERROR_STATUS_BITS | \
1078 __LPU1_ERROR_STATUS_BITS | \
1079 __PSS_ERROR_STATUS_BITS | \
1080 __HALT_STATUS_BITS))
1081
1082#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
1083 (BNA_IS_MBOX_INTR((_intr_status)) | \
1084 BNA_IS_ERR_INTR((_intr_status)))
1085
1086#define BNA_IS_INTX_DATA_INTR(_intr_status) \
1087 ((_intr_status) & __LPU2HOST_IB_STATUS_BITS)
1088
1089#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
1090do { \
1091 (_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
1092 __LPU02HOST_MBOX1_STATUS_BITS | \
1093 __LPU12HOST_MBOX0_STATUS_BITS | \
1094 __LPU12HOST_MBOX1_STATUS_BITS); \
1095} while (0)
1096
1097#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
1098do { \
1099 (_intr_status) &= ~(__EMC_ERROR_STATUS_BITS | \
1100 __LPU0_ERROR_STATUS_BITS | \
1101 __LPU1_ERROR_STATUS_BITS | \
1102 __PSS_ERROR_STATUS_BITS | \
1103 __HALT_STATUS_BITS); \
1104} while (0)
1105
1106#define bna_intx_disable(_bna, _cur_mask) \
1107{ \
1108 (_cur_mask) = readl((_bna)->regs.fn_int_mask);\
1109 writel(0xffffffff, (_bna)->regs.fn_int_mask);\
1110}
1111
1112#define bna_intx_enable(bna, new_mask) \
1113 writel((new_mask), (bna)->regs.fn_int_mask)
1114
1115#define bna_mbox_intr_disable(bna) \
1116 writel((readl((bna)->regs.fn_int_mask) | \
1117 (__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
1118 (bna)->regs.fn_int_mask)
1119
1120#define bna_mbox_intr_enable(bna) \
1121 writel((readl((bna)->regs.fn_int_mask) & \
1122 ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
1123 (bna)->regs.fn_int_mask)
1124
1125#define bna_intr_status_get(_bna, _status) \
1126{ \
1127 (_status) = readl((_bna)->regs.fn_int_status); \
1128 if ((_status)) { \
1129 writel((_status) & ~(__LPU02HOST_MBOX0_STATUS_BITS |\
1130 __LPU02HOST_MBOX1_STATUS_BITS |\
1131 __LPU12HOST_MBOX0_STATUS_BITS |\
1132 __LPU12HOST_MBOX1_STATUS_BITS), \
1133 (_bna)->regs.fn_int_status);\
1134 } \
1135}
1136
1137#define bna_intr_status_get_no_clr(_bna, _status) \
1138 (_status) = readl((_bna)->regs.fn_int_status)
1139
1140#define bna_intr_mask_get(bna, mask) \
1141 (*mask) = readl((bna)->regs.fn_int_mask)
1142
1143#define bna_intr_ack(bna, intr_bmap) \
1144 writel((intr_bmap), (bna)->regs.fn_int_status)
1145
1146#define bna_ib_intx_disable(bna, ib_id) \
1147 writel(readl((bna)->regs.fn_int_mask) | \
1148 (1 << (ib_id)), \
1149 (bna)->regs.fn_int_mask)
1150
1151#define bna_ib_intx_enable(bna, ib_id) \
1152 writel(readl((bna)->regs.fn_int_mask) & \
1153 ~(1 << (ib_id)), \
1154 (bna)->regs.fn_int_mask)
1155
1156#define bna_mbox_msix_idx_set(_device) \
1157do {\
1158 writel(((_device)->vector & 0x000001FF), \
1159 (_device)->bna->pcidev.pci_bar_kva + \
1160 reg_offset[(_device)->bna->pcidev.pci_func].msix_idx);\
1161} while (0)
1162
1163/**
1164 *
1165 * TxQ, RxQ, CQ related bits, offsets, macros
1166 *
1167 */
1168
1169#define BNA_Q_IDLE_STATE 0x00008001
1170
1171#define BNA_GET_DOORBELL_BASE_ADDR(_bar0) \
1172 ((_bar0) + HQM_DOORBELL_BLK_BASE_ADDR)
1173
1174#define BNA_GET_DOORBELL_ENTRY_OFFSET(_entry) \
1175 ((HQM_DOORBELL_BLK_BASE_ADDR) \
1176 + (_entry << 7))
1177
1178#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
1179 (0x80000000 | ((_timeout) << 16) | (_events))
1180
1181#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
1182
1183/* TxQ Entry Opcodes */
1184#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
1185#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
1186#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
1187
1188/* TxQ Entry Control Flags */
1189#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
1190#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
1191#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
1192#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
1193#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
1194#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
1195#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
1196
1197#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
1198 (((_hdr_size) << 10) | ((_offset) & 0x3FF))
1199
1200/*
1201 * Completion Q defines
1202 */
1203/* CQ Entry Flags */
1204#define BNA_CQ_EF_MAC_ERROR (1 << 0)
1205#define BNA_CQ_EF_FCS_ERROR (1 << 1)
1206#define BNA_CQ_EF_TOO_LONG (1 << 2)
1207#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
1208
1209#define BNA_CQ_EF_RSVD1 (1 << 4)
1210#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
1211#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
1212#define BNA_CQ_EF_HDS_HEADER (1 << 7)
1213
1214#define BNA_CQ_EF_UDP (1 << 8)
1215#define BNA_CQ_EF_TCP (1 << 9)
1216#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
1217#define BNA_CQ_EF_IPV6 (1 << 11)
1218
1219#define BNA_CQ_EF_IPV4 (1 << 12)
1220#define BNA_CQ_EF_VLAN (1 << 13)
1221#define BNA_CQ_EF_RSS (1 << 14)
1222#define BNA_CQ_EF_RSVD2 (1 << 15)
1223
1224#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
1225#define BNA_CQ_EF_MCAST (1 << 17)
1226#define BNA_CQ_EF_BCAST (1 << 18)
1227#define BNA_CQ_EF_REMOTE (1 << 19)
1228
1229#define BNA_CQ_EF_LOCAL (1 << 20)
1230
1231/**
1232 *
1233 * Data structures
1234 *
1235 */
1236
1237enum txf_flags {
1238 BFI_TXF_CF_ENABLE = 1 << 0,
1239 BFI_TXF_CF_VLAN_FILTER = 1 << 8,
1240 BFI_TXF_CF_VLAN_ADMIT = 1 << 9,
1241 BFI_TXF_CF_VLAN_INSERT = 1 << 10,
1242 BFI_TXF_CF_RSVD1 = 1 << 11,
1243 BFI_TXF_CF_MAC_SA_CHECK = 1 << 12,
1244 BFI_TXF_CF_VLAN_WI_BASED = 1 << 13,
1245 BFI_TXF_CF_VSWITCH_MCAST = 1 << 14,
1246 BFI_TXF_CF_VSWITCH_UCAST = 1 << 15,
1247 BFI_TXF_CF_RSVD2 = 0x7F << 1
1248};
1249
1250enum ib_flags {
1251 BFI_IB_CF_MASTER_ENABLE = (1 << 0),
1252 BFI_IB_CF_MSIX_MODE = (1 << 1),
1253 BFI_IB_CF_COALESCING_MODE = (1 << 2),
1254 BFI_IB_CF_INTER_PKT_ENABLE = (1 << 3),
1255 BFI_IB_CF_INT_ENABLE = (1 << 4),
1256 BFI_IB_CF_INTER_PKT_DMA = (1 << 5),
1257 BFI_IB_CF_ACK_PENDING = (1 << 6),
1258 BFI_IB_CF_RESERVED1 = (1 << 7)
1259};
1260
1261enum rss_hash_type {
1262 BFI_RSS_T_V4_TCP = (1 << 11),
1263 BFI_RSS_T_V4_IP = (1 << 10),
1264 BFI_RSS_T_V6_TCP = (1 << 9),
1265 BFI_RSS_T_V6_IP = (1 << 8)
1266};
1267enum hds_header_type {
1268 BNA_HDS_T_V4_TCP = (1 << 11),
1269 BNA_HDS_T_V4_UDP = (1 << 10),
1270 BNA_HDS_T_V6_TCP = (1 << 9),
1271 BNA_HDS_T_V6_UDP = (1 << 8),
1272 BNA_HDS_FORCED = (1 << 7),
1273};
1274enum rxf_flags {
1275 BNA_RXF_CF_SM_LG_RXQ = (1 << 15),
1276 BNA_RXF_CF_DEFAULT_VLAN = (1 << 14),
1277 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE = (1 << 13),
1278 BNA_RXF_CF_VLAN_STRIP = (1 << 12),
1279 BNA_RXF_CF_RSS_ENABLE = (1 << 8)
1280};
1281struct bna_chip_regs_offset {
1282 u32 page_addr;
1283 u32 fn_int_status;
1284 u32 fn_int_mask;
1285 u32 msix_idx;
1286};
1287
1288struct bna_chip_regs {
1289 void __iomem *page_addr;
1290 void __iomem *fn_int_status;
1291 void __iomem *fn_int_mask;
1292};
1293
1294struct bna_txq_mem {
1295 u32 pg_tbl_addr_lo;
1296 u32 pg_tbl_addr_hi;
1297 u32 cur_q_entry_lo;
1298 u32 cur_q_entry_hi;
1299 u32 reserved1;
1300 u32 reserved2;
1301 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1302 /* 15:0 ->producer pointer (index?) */
1303 u32 entry_n_pg_size; /* 31:16->entry size */
1304 /* 15:0 ->page size */
1305 u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
1306 /* 23:16->Int Blk Offset */
1307 /* 15:0 ->consumer pointer(index?) */
1308 u32 cns_ptr2_n_q_state; /* 31:16->cons. ptr 2; 15:0-> Q state */
1309 u32 nxt_qid_n_fid_n_pri; /* 17:10->next */
1310 /* QId;9:3->FID;2:0->Priority */
1311 u32 wvc_n_cquota_n_rquota; /* 31:24->WI Vector Count; */
1312 /* 23:12->Cfg Quota; */
1313 /* 11:0 ->Run Quota */
1314 u32 reserved3[4];
1315};
1316
1317struct bna_rxq_mem {
1318 u32 pg_tbl_addr_lo;
1319 u32 pg_tbl_addr_hi;
1320 u32 cur_q_entry_lo;
1321 u32 cur_q_entry_hi;
1322 u32 reserved1;
1323 u32 reserved2;
1324 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1325 /* 15:0 ->producer pointer (index?) */
1326 u32 entry_n_pg_size; /* 31:16->entry size */
1327 /* 15:0 ->page size */
1328 u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
1329 /* 23:16->CQ; */
1330 /* 15:0->consumer pointer(index?) */
1331 u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
1332 u32 next_qid; /* 17:10->next QId */
1333 u32 reserved3;
1334 u32 reserved4[4];
1335};
1336
1337struct bna_rxtx_q_mem {
1338 struct bna_rxq_mem rxq;
1339 struct bna_txq_mem txq;
1340};
1341
1342struct bna_cq_mem {
1343 u32 pg_tbl_addr_lo;
1344 u32 pg_tbl_addr_hi;
1345 u32 cur_q_entry_lo;
1346 u32 cur_q_entry_hi;
1347
1348 u32 reserved1;
1349 u32 reserved2;
1350 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1351 /* 15:0 ->producer pointer (index?) */
1352 u32 entry_n_pg_size; /* 31:16->entry size */
1353 /* 15:0 ->page size */
1354 u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
1355 /* 23:16->Int Blk Offset */
1356 /* 15:0 ->consumer pointer(index?) */
1357 u32 q_state; /* 31:16->reserved; 15:0-> Q state */
1358 u32 reserved3[2];
1359 u32 reserved4[4];
1360};
1361
1362struct bna_ib_blk_mem {
1363 u32 host_addr_lo;
1364 u32 host_addr_hi;
1365 u32 clsc_n_ctrl_n_msix; /* 31:24->coalescing; */
1366 /* 23:16->coalescing cfg; */
1367 /* 15:8 ->control; */
1368 /* 7:0 ->msix; */
1369 u32 ipkt_n_ent_n_idxof;
1370 u32 ipkt_cnt_cfg_n_unacked;
1371
1372 u32 reserved[3];
1373};
1374
1375struct bna_idx_tbl_mem {
1376 u32 idx; /* !< 31:16->res;15:0->idx; */
1377};
1378
1379struct bna_doorbell_qset {
1380 u32 rxq[0x20 >> 2];
1381 u32 txq[0x20 >> 2];
1382 u32 ib0[0x20 >> 2];
1383 u32 ib1[0x20 >> 2];
1384};
1385
1386struct bna_rx_fndb_ram {
1387 u32 rss_prop;
1388 u32 size_routing_props;
1389 u32 rit_hds_mcastq;
1390 u32 control_flags;
1391};
1392
1393struct bna_tx_fndb_ram {
1394 u32 vlan_n_ctrl_flags;
1395};
1396
1397/**
1398 * @brief
1399 * Structure which maps to RxFn Indirection Table (RIT)
1400 * Size : 1 word
1401 * See catapult_spec.pdf, RxA for details
1402 */
1403struct bna_rit_mem {
1404 u32 rxq_ids; /* !< 31:12->res;11:0->two 6 bit RxQ Ids */
1405};
1406
1407/**
1408 * @brief
1409 * Structure which maps to RSS Table entry
1410 * Size : 16 words
1411 * See catapult_spec.pdf, RAD for details
1412 */
1413struct bna_rss_mem {
1414 /*
1415 * 31:12-> res
1416 * 11:8 -> protocol type
1417 * 7:0 -> hash index
1418 */
1419 u32 type_n_hash;
1420 u32 hash_key[10]; /* !< 40 byte Toeplitz hash key */
1421 u32 reserved[5];
1422};
1423
1424/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
1425struct bna_dma_addr {
1426 u32 msb;
1427 u32 lsb;
1428};
1429
1430struct bna_txq_wi_vector {
1431 u16 reserved;
1432 u16 length; /* Only 14 LSB are valid */
1433 struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
1434};
1435
1436typedef u16 bna_txq_wi_opcode_t;
1437
1438typedef u16 bna_txq_wi_ctrl_flag_t;
1439
1440/**
1441 * TxQ Entry Structure
1442 *
1443 * BEWARE: Load values into this structure with correct endianess.
1444 */
1445struct bna_txq_entry {
1446 union {
1447 struct {
1448 u8 reserved;
1449 u8 num_vectors; /* number of vectors present */
1450 bna_txq_wi_opcode_t opcode; /* Either */
1451 /* BNA_TXQ_WI_SEND or */
1452 /* BNA_TXQ_WI_SEND_LSO */
1453 bna_txq_wi_ctrl_flag_t flags; /* OR of all the flags */
1454 u16 l4_hdr_size_n_offset;
1455 u16 vlan_tag;
1456 u16 lso_mss; /* Only 14 LSB are valid */
1457 u32 frame_length; /* Only 24 LSB are valid */
1458 } wi;
1459
1460 struct {
1461 u16 reserved;
1462 bna_txq_wi_opcode_t opcode; /* Must be */
1463 /* BNA_TXQ_WI_EXTENSION */
1464 u32 reserved2[3]; /* Place holder for */
1465 /* removed vector (12 bytes) */
1466 } wi_ext;
1467 } hdr;
1468 struct bna_txq_wi_vector vector[4];
1469};
1470#define wi_hdr hdr.wi
1471#define wi_ext_hdr hdr.wi_ext
1472
1473/* RxQ Entry Structure */
1474struct bna_rxq_entry { /* Rx-Buffer */
1475 struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
1476};
1477
1478typedef u32 bna_cq_e_flag_t;
1479
1480/* CQ Entry Structure */
1481struct bna_cq_entry {
1482 bna_cq_e_flag_t flags;
1483 u16 vlan_tag;
1484 u16 length;
1485 u32 rss_hash;
1486 u8 valid;
1487 u8 reserved1;
1488 u8 reserved2;
1489 u8 rxq_id;
1490};
1491
1492#endif /* __BNA_HW_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_txrx.c b/drivers/net/ethernet/brocade/bna/bna_txrx.c
new file mode 100644
index 000000000000..f0983c832447
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_txrx.c
@@ -0,0 +1,4185 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19#include "bfa_cs.h"
20#include "bfi.h"
21
22/**
23 * IB
24 */
25#define bna_ib_find_free_ibidx(_mask, _pos)\
26do {\
27 (_pos) = 0;\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
30 (_pos)++;\
31} while (0)
32
33#define bna_ib_count_ibidx(_mask, _count)\
34do {\
35 int pos = 0;\
36 (_count) = 0;\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
39 (_count) = pos + 1;\
40 pos++;\
41 } \
42} while (0)
43
44#define bna_ib_select_segpool(_count, _q_idx)\
45do {\
46 int i;\
47 (_q_idx) = -1;\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
50 (_q_idx) = i;\
51 break;\
52 } \
53 } \
54} while (0)
55
56struct bna_ibidx_pool {
57 int pool_size;
58 int pool_entry_size;
59};
60init_ibidx_pool(ibidx_pool);
61
62static struct bna_intr *
63bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
64 int vector)
65{
66 struct bna_intr *intr;
67 struct list_head *qe;
68
69 list_for_each(qe, &ib_mod->intr_active_q) {
70 intr = (struct bna_intr *)qe;
71
72 if ((intr->intr_type == intr_type) &&
73 (intr->vector == vector)) {
74 intr->ref_count++;
75 return intr;
76 }
77 }
78
79 if (list_empty(&ib_mod->intr_free_q))
80 return NULL;
81
82 bfa_q_deq(&ib_mod->intr_free_q, &intr);
83 bfa_q_qe_init(&intr->qe);
84
85 intr->ref_count = 1;
86 intr->intr_type = intr_type;
87 intr->vector = vector;
88
89 list_add_tail(&intr->qe, &ib_mod->intr_active_q);
90
91 return intr;
92}
93
94static void
95bna_intr_put(struct bna_ib_mod *ib_mod,
96 struct bna_intr *intr)
97{
98 intr->ref_count--;
99
100 if (intr->ref_count == 0) {
101 intr->ib = NULL;
102 list_del(&intr->qe);
103 bfa_q_qe_init(&intr->qe);
104 list_add_tail(&intr->qe, &ib_mod->intr_free_q);
105 }
106}
107
108void
109bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
110 struct bna_res_info *res_info)
111{
112 int i;
113 int j;
114 int count;
115 u8 offset;
116 struct bna_doorbell_qset *qset;
117 unsigned long off;
118
119 ib_mod->bna = bna;
120
121 ib_mod->ib = (struct bna_ib *)
122 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
123 ib_mod->intr = (struct bna_intr *)
124 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
125 ib_mod->idx_seg = (struct bna_ibidx_seg *)
126 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
127
128 INIT_LIST_HEAD(&ib_mod->ib_free_q);
129 INIT_LIST_HEAD(&ib_mod->intr_free_q);
130 INIT_LIST_HEAD(&ib_mod->intr_active_q);
131
132 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
133 INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
134
135 for (i = 0; i < BFI_MAX_IB; i++) {
136 ib_mod->ib[i].ib_id = i;
137
138 ib_mod->ib[i].ib_seg_host_addr_kva =
139 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
140 ib_mod->ib[i].ib_seg_host_addr.lsb =
141 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
142 ib_mod->ib[i].ib_seg_host_addr.msb =
143 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
144
145 qset = (struct bna_doorbell_qset *)0;
146 off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
147 * (0x20 >> 2)]);
148 ib_mod->ib[i].door_bell.doorbell_addr = off +
149 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
150
151 bfa_q_qe_init(&ib_mod->ib[i].qe);
152 list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
153
154 bfa_q_qe_init(&ib_mod->intr[i].qe);
155 list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
156 }
157
158 count = 0;
159 offset = 0;
160 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
161 for (j = 0; j < ibidx_pool[i].pool_size; j++) {
162 bfa_q_qe_init(&ib_mod->idx_seg[count]);
163 ib_mod->idx_seg[count].ib_seg_size =
164 ibidx_pool[i].pool_entry_size;
165 ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
166 list_add_tail(&ib_mod->idx_seg[count].qe,
167 &ib_mod->ibidx_seg_pool[i]);
168 count++;
169 offset += ibidx_pool[i].pool_entry_size;
170 }
171 }
172}
173
174void
175bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
176{
177 int i;
178 int j;
179 struct list_head *qe;
180
181 i = 0;
182 list_for_each(qe, &ib_mod->ib_free_q)
183 i++;
184
185 i = 0;
186 list_for_each(qe, &ib_mod->intr_free_q)
187 i++;
188
189 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
190 j = 0;
191 list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
192 j++;
193 }
194
195 ib_mod->bna = NULL;
196}
197
198static struct bna_ib *
199bna_ib_get(struct bna_ib_mod *ib_mod,
200 enum bna_intr_type intr_type,
201 int vector)
202{
203 struct bna_ib *ib;
204 struct bna_intr *intr;
205
206 if (intr_type == BNA_INTR_T_INTX)
207 vector = (1 << vector);
208
209 intr = bna_intr_get(ib_mod, intr_type, vector);
210 if (intr == NULL)
211 return NULL;
212
213 if (intr->ib) {
214 if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
215 bna_intr_put(ib_mod, intr);
216 return NULL;
217 }
218 intr->ib->ref_count++;
219 return intr->ib;
220 }
221
222 if (list_empty(&ib_mod->ib_free_q)) {
223 bna_intr_put(ib_mod, intr);
224 return NULL;
225 }
226
227 bfa_q_deq(&ib_mod->ib_free_q, &ib);
228 bfa_q_qe_init(&ib->qe);
229
230 ib->ref_count = 1;
231 ib->start_count = 0;
232 ib->idx_mask = 0;
233
234 ib->intr = intr;
235 ib->idx_seg = NULL;
236 intr->ib = ib;
237
238 ib->bna = ib_mod->bna;
239
240 return ib;
241}
242
243static void
244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
245{
246 bna_intr_put(ib_mod, ib->intr);
247
248 ib->ref_count--;
249
250 if (ib->ref_count == 0) {
251 ib->intr = NULL;
252 ib->bna = NULL;
253 list_add_tail(&ib->qe, &ib_mod->ib_free_q);
254 }
255}
256
257/* Returns index offset - starting from 0 */
258static int
259bna_ib_reserve_idx(struct bna_ib *ib)
260{
261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
262 struct bna_ibidx_seg *idx_seg;
263 int idx;
264 int num_idx;
265 int q_idx;
266
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib->idx_mask, idx);
269 if (idx == BFI_IBIDX_MAX_SEGSIZE)
270 return -1;
271
272 /*
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
275 */
276 bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
277
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
280 ib->idx_mask |= (1 << idx);
281 return idx;
282 }
283
284 if (ib->start_count)
285 return -1;
286
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx, q_idx);
289 while (1) {
290 if (q_idx == BFI_IBIDX_TOTAL_POOLS)
291 return -1;
292 if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
293 break;
294 q_idx++;
295 }
296 bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
297 bfa_q_qe_init(&idx_seg->qe);
298
299 /* Free the old segment */
300 if (ib->idx_seg) {
301 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
302 list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
303 }
304
305 ib->idx_seg = idx_seg;
306
307 ib->idx_mask |= (1 << idx);
308
309 return idx;
310}
311
312static void
313bna_ib_release_idx(struct bna_ib *ib, int idx)
314{
315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
316 struct bna_ibidx_seg *idx_seg;
317 int num_idx;
318 int cur_q_idx;
319 int new_q_idx;
320
321 ib->idx_mask &= ~(1 << idx);
322
323 if (ib->start_count)
324 return;
325
326 bna_ib_count_ibidx(ib->idx_mask, num_idx);
327
328 /*
329 * Free the segment, if there are no more indexes in the segment
330 * held by this IB
331 */
332 if (!num_idx) {
333 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
334 list_add_tail(&ib->idx_seg->qe,
335 &ib_mod->ibidx_seg_pool[cur_q_idx]);
336 ib->idx_seg = NULL;
337 return;
338 }
339
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx, new_q_idx);
342 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
343 while (new_q_idx < cur_q_idx) {
344 if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
345 break;
346 new_q_idx++;
347 }
348 if (new_q_idx < cur_q_idx) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
351 bfa_q_qe_init(&idx_seg->qe);
352 /* Free the old segment */
353 list_add_tail(&ib->idx_seg->qe,
354 &ib_mod->ibidx_seg_pool[cur_q_idx]);
355 ib->idx_seg = idx_seg;
356 }
357}
358
359static int
360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
361{
362 if (ib->start_count)
363 return -1;
364
365 ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
366 ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
367 ib->ib_config.interpkt_count = ib_config->interpkt_count;
368 ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
369
370 ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
371 if (ib->intr->intr_type == BNA_INTR_T_MSIX)
372 ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
373
374 return 0;
375}
376
377static void
378bna_ib_start(struct bna_ib *ib)
379{
380 struct bna_ib_blk_mem ib_cfg;
381 struct bna_ib_blk_mem *ib_mem;
382 u32 pg_num;
383 u32 intx_mask;
384 int i;
385 void __iomem *base_addr;
386 unsigned long off;
387
388 ib->start_count++;
389
390 if (ib->start_count > 1)
391 return;
392
393 ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
394 ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
395
396 ib_cfg.clsc_n_ctrl_n_msix = (((u32)
397 ib->ib_config.coalescing_timeo << 16) |
398 ((u32)ib->ib_config.ctrl_flags << 8) |
399 (ib->intr->vector));
400 ib_cfg.ipkt_n_ent_n_idxof =
401 ((u32)
402 (ib->ib_config.interpkt_timeo & 0xf) << 16) |
403 ((u32)ib->idx_seg->ib_seg_size << 8) |
404 (ib->idx_seg->ib_idx_tbl_offset);
405 ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
406 ib->ib_config.interpkt_count << 24);
407
408 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
409 HQM_IB_RAM_BASE_OFFSET);
410 writel(pg_num, ib->bna->regs.page_addr);
411
412 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
413 HQM_IB_RAM_BASE_OFFSET);
414
415 ib_mem = (struct bna_ib_blk_mem *)0;
416 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
417 writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
418
419 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
420 writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
421
422 off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
423 writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
424
425 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
426 writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
427
428 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
429 writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
430
431 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
432 (u32)ib->ib_config.coalescing_timeo, 0);
433
434 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
435 HQM_INDX_TBL_RAM_BASE_OFFSET);
436 writel(pg_num, ib->bna->regs.page_addr);
437
438 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
439 HQM_INDX_TBL_RAM_BASE_OFFSET);
440 for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
441 off = (unsigned long)
442 ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
443 writel(0, base_addr + off);
444 }
445
446 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
447 bna_intx_disable(ib->bna, intx_mask);
448 intx_mask &= ~(ib->intr->vector);
449 bna_intx_enable(ib->bna, intx_mask);
450 }
451}
452
453static void
454bna_ib_stop(struct bna_ib *ib)
455{
456 u32 intx_mask;
457
458 ib->start_count--;
459
460 if (ib->start_count == 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE,
462 ib->door_bell.doorbell_addr);
463 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
464 bna_intx_disable(ib->bna, intx_mask);
465 intx_mask |= (ib->intr->vector);
466 bna_intx_enable(ib->bna, intx_mask);
467 }
468 }
469}
470
471static void
472bna_ib_fail(struct bna_ib *ib)
473{
474 ib->start_count = 0;
475}
476
477/**
478 * RXF
479 */
480static void rxf_enable(struct bna_rxf *rxf);
481static void rxf_disable(struct bna_rxf *rxf);
482static void __rxf_config_set(struct bna_rxf *rxf);
483static void __rxf_rit_set(struct bna_rxf *rxf);
484static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
485static int rxf_process_packet_filter(struct bna_rxf *rxf);
486static int rxf_clear_packet_filter(struct bna_rxf *rxf);
487static void rxf_reset_packet_filter(struct bna_rxf *rxf);
488static void rxf_cb_enabled(void *arg, int status);
489static void rxf_cb_disabled(void *arg, int status);
490static void bna_rxf_cb_stats_cleared(void *arg, int status);
491static void __rxf_enable(struct bna_rxf *rxf);
492static void __rxf_disable(struct bna_rxf *rxf);
493
494bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
495 enum bna_rxf_event);
496bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
497 enum bna_rxf_event);
498bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
499 enum bna_rxf_event);
500bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
501 enum bna_rxf_event);
502bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
503 enum bna_rxf_event);
504bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
505 enum bna_rxf_event);
506bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
507 enum bna_rxf_event);
508bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
509 enum bna_rxf_event);
510bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
511 enum bna_rxf_event);
512
513static struct bfa_sm_table rxf_sm_table[] = {
514 {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
515 {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
517 {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
519 {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
520 {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
521 {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
523};
524
525static void
526bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
527{
528 call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
529}
530
531static void
532bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
533{
534 switch (event) {
535 case RXF_E_START:
536 bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
537 break;
538
539 case RXF_E_STOP:
540 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
541 break;
542
543 case RXF_E_FAIL:
544 /* No-op */
545 break;
546
547 case RXF_E_CAM_FLTR_MOD:
548 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
549 break;
550
551 case RXF_E_STARTED:
552 case RXF_E_STOPPED:
553 case RXF_E_CAM_FLTR_RESP:
554 /**
555 * These events are received due to flushing of mbox
556 * when device fails
557 */
558 /* No-op */
559 break;
560
561 case RXF_E_PAUSE:
562 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
563 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
564 break;
565
566 case RXF_E_RESUME:
567 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
568 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
569 break;
570
571 default:
572 bfa_sm_fault(event);
573 }
574}
575
576static void
577bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
578{
579 __rxf_config_set(rxf);
580 __rxf_rit_set(rxf);
581 rxf_enable(rxf);
582}
583
584static void
585bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
586{
587 switch (event) {
588 case RXF_E_STOP:
589 /**
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
592 */
593 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
594 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
595 break;
596
597 case RXF_E_FAIL:
598 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
599 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
600 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
601 break;
602
603 case RXF_E_CAM_FLTR_MOD:
604 /* No-op */
605 break;
606
607 case RXF_E_STARTED:
608 /**
609 * Force rxf_process_filter() to go through initial
610 * config
611 */
612 if ((rxf->ucast_active_mac != NULL) &&
613 (rxf->ucast_pending_set == 0))
614 rxf->ucast_pending_set = 1;
615
616 if (rxf->rss_status == BNA_STATUS_T_ENABLED)
617 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
618
619 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
620
621 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
622 break;
623
624 case RXF_E_PAUSE:
625 case RXF_E_RESUME:
626 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
627 break;
628
629 default:
630 bfa_sm_fault(event);
631 }
632}
633
634static void
635bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
636{
637 if (!rxf_process_packet_filter(rxf)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
640 }
641}
642
643static void
644bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
645{
646 switch (event) {
647 case RXF_E_STOP:
648 /**
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
651 */
652 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
653 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
654 break;
655
656 case RXF_E_FAIL:
657 rxf_reset_packet_filter(rxf);
658 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
659 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
660 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
661 break;
662
663 case RXF_E_CAM_FLTR_MOD:
664 /* No-op */
665 break;
666
667 case RXF_E_CAM_FLTR_RESP:
668 if (!rxf_process_packet_filter(rxf)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
671 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
672 }
673 break;
674
675 case RXF_E_PAUSE:
676 case RXF_E_RESUME:
677 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
678 break;
679
680 default:
681 bfa_sm_fault(event);
682 }
683}
684
685static void
686bna_rxf_sm_started_entry(struct bna_rxf *rxf)
687{
688 call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
689
690 if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
691 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
692 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
693 else
694 bfa_fsm_send_event(rxf, RXF_E_RESUME);
695 }
696
697}
698
699static void
700bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
701{
702 switch (event) {
703 case RXF_E_STOP:
704 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
707 break;
708
709 case RXF_E_FAIL:
710 rxf_reset_packet_filter(rxf);
711 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
712 break;
713
714 case RXF_E_CAM_FLTR_MOD:
715 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
716 break;
717
718 case RXF_E_PAUSE:
719 bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
720 break;
721
722 case RXF_E_RESUME:
723 bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
724 break;
725
726 default:
727 bfa_sm_fault(event);
728 }
729}
730
731static void
732bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
733{
734 /**
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
738 */
739}
740
741static void
742bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
743{
744 switch (event) {
745 case RXF_E_FAIL:
746 /**
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
750 */
751 rxf_reset_packet_filter(rxf);
752 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
753 break;
754
755 case RXF_E_CAM_FLTR_RESP:
756 if (!rxf_clear_packet_filter(rxf)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
759 rxf_disable(rxf);
760 }
761 break;
762
763 default:
764 bfa_sm_fault(event);
765 }
766}
767
768static void
769bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
770{
771 /**
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
775 */
776}
777
778static void
779bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
780{
781 switch (event) {
782 case RXF_E_FAIL:
783 /**
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
787 */
788 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
789 break;
790
791 case RXF_E_STARTED:
792 /**
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
795 * RXF_E_STOP event
796 */
797 rxf_disable(rxf);
798 break;
799
800 case RXF_E_STOPPED:
801 /**
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
805 */
806 bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
807 break;
808
809 case RXF_E_PAUSE:
810 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
811 break;
812
813 case RXF_E_RESUME:
814 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
815 break;
816
817 default:
818 bfa_sm_fault(event);
819 }
820}
821
822static void
823bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
824{
825 rxf->rxf_flags &=
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
827 __rxf_disable(rxf);
828}
829
830static void
831bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
832{
833 switch (event) {
834 case RXF_E_FAIL:
835 /**
836 * FSM was in the process of disabling rxf, initiated by
837 * bnad.
838 */
839 call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
840 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
841 break;
842
843 case RXF_E_STOPPED:
844 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
845 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
846 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
847 break;
848
849 /*
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
852 */
853 default:
854 bfa_sm_fault(event);
855 }
856}
857
858static void
859bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
860{
861 rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
862 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
863 __rxf_enable(rxf);
864}
865
866static void
867bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
868{
869 switch (event) {
870 case RXF_E_FAIL:
871 /**
872 * FSM was in the process of disabling rxf, initiated by
873 * bnad.
874 */
875 call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
876 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
877 break;
878
879 case RXF_E_STARTED:
880 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
881 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
882 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
883 break;
884
885 /*
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
888 */
889 default:
890 bfa_sm_fault(event);
891 }
892}
893
894static void
895bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
896{
897 __bna_rxf_stat_clr(rxf);
898}
899
900static void
901bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
902{
903 switch (event) {
904 case RXF_E_FAIL:
905 case RXF_E_STAT_CLEARED:
906 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
907 break;
908
909 default:
910 bfa_sm_fault(event);
911 }
912}
913
914static void
915__rxf_enable(struct bna_rxf *rxf)
916{
917 struct bfi_ll_rxf_multi_req ll_req;
918 u32 bm[2] = {0, 0};
919
920 if (rxf->rxf_id < 32)
921 bm[0] = 1 << rxf->rxf_id;
922 else
923 bm[1] = 1 << (rxf->rxf_id - 32);
924
925 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
926 ll_req.rxf_id_mask[0] = htonl(bm[0]);
927 ll_req.rxf_id_mask[1] = htonl(bm[1]);
928 ll_req.enable = 1;
929
930 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
931 rxf_cb_enabled, rxf);
932
933 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
934}
935
936static void
937__rxf_disable(struct bna_rxf *rxf)
938{
939 struct bfi_ll_rxf_multi_req ll_req;
940 u32 bm[2] = {0, 0};
941
942 if (rxf->rxf_id < 32)
943 bm[0] = 1 << rxf->rxf_id;
944 else
945 bm[1] = 1 << (rxf->rxf_id - 32);
946
947 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
948 ll_req.rxf_id_mask[0] = htonl(bm[0]);
949 ll_req.rxf_id_mask[1] = htonl(bm[1]);
950 ll_req.enable = 0;
951
952 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
953 rxf_cb_disabled, rxf);
954
955 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
956}
957
958static void
959__rxf_config_set(struct bna_rxf *rxf)
960{
961 u32 i;
962 struct bna_rss_mem *rss_mem;
963 struct bna_rx_fndb_ram *rx_fndb_ram;
964 struct bna *bna = rxf->rx->bna;
965 void __iomem *base_addr;
966 unsigned long off;
967
968 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
969 RSS_TABLE_BASE_OFFSET);
970
971 rss_mem = (struct bna_rss_mem *)0;
972
973 /* Configure RSS if required */
974 if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
977 bna->port_num, RSS_TABLE_BASE_OFFSET),
978 bna->regs.page_addr);
979
980 /* temporarily disable RSS, while hash value is written */
981 off = (unsigned long)&rss_mem[0].type_n_hash;
982 writel(0, base_addr + off);
983
984 for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
985 off = (unsigned long)
986 &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
987 writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
988 base_addr + off);
989 }
990
991 off = (unsigned long)&rss_mem[0].type_n_hash;
992 writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
993 base_addr + off);
994 }
995
996 /* Configure RxF */
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
999 RX_FNDB_RAM_BASE_OFFSET),
1000 bna->regs.page_addr);
1001
1002 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1003 RX_FNDB_RAM_BASE_OFFSET);
1004
1005 rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
1006
1007 /* We always use RSS table 0 */
1008 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
1009 writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
1010 base_addr + off);
1011
1012 /* small large buffer enable/disable */
1013 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
1014 writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
1015 base_addr + off);
1016
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
1019 writel((rxf->rit_segment->rit_offset << 16) |
1020 (rxf->forced_offset << 8) |
1021 (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
1022 base_addr + off);
1023
1024 /*
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1027 */
1028
1029 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
1030 writel(((u32)rxf->default_vlan_tag << 16) |
1031 (rxf->ctrl_flags &
1032 (BNA_RXF_CF_DEFAULT_VLAN |
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
1034 BNA_RXF_CF_VLAN_STRIP)) |
1035 (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
1036 rxf->hds_cfg.header_size,
1037 base_addr + off);
1038}
1039
1040void
1041__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
1042{
1043 struct bna *bna = rxf->rx->bna;
1044 int i;
1045
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
1047 (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
1048 bna->regs.page_addr);
1049
1050 if (status == BNA_STATUS_T_ENABLED) {
1051 /* enable VLAN filtering on this function */
1052 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1053 writel(rxf->vlan_filter_table[i],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1056 i * 32));
1057 }
1058 } else {
1059 /* disable VLAN filtering on this function */
1060 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1061 writel(0xffffffff,
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1064 i * 32));
1065 }
1066 }
1067}
1068
1069static void
1070__rxf_rit_set(struct bna_rxf *rxf)
1071{
1072 struct bna *bna = rxf->rx->bna;
1073 struct bna_rit_mem *rit_mem;
1074 int i;
1075 void __iomem *base_addr;
1076 unsigned long off;
1077
1078 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1079 FUNCTION_TO_RXQ_TRANSLATE);
1080
1081 rit_mem = (struct bna_rit_mem *)0;
1082
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
1084 FUNCTION_TO_RXQ_TRANSLATE),
1085 bna->regs.page_addr);
1086
1087 for (i = 0; i < rxf->rit_segment->rit_size; i++) {
1088 off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
1089 writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
1090 rxf->rit_segment->rit[i].small_rxq_id,
1091 base_addr + off);
1092 }
1093}
1094
1095static void
1096__bna_rxf_stat_clr(struct bna_rxf *rxf)
1097{
1098 struct bfi_ll_stats_req ll_req;
1099 u32 bm[2] = {0, 0};
1100
1101 if (rxf->rxf_id < 32)
1102 bm[0] = 1 << rxf->rxf_id;
1103 else
1104 bm[1] = 1 << (rxf->rxf_id - 32);
1105
1106 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
1107 ll_req.stats_mask = 0;
1108 ll_req.txf_id_mask[0] = 0;
1109 ll_req.txf_id_mask[1] = 0;
1110
1111 ll_req.rxf_id_mask[0] = htonl(bm[0]);
1112 ll_req.rxf_id_mask[1] = htonl(bm[1]);
1113
1114 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
1115 bna_rxf_cb_stats_cleared, rxf);
1116 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1117}
1118
1119static void
1120rxf_enable(struct bna_rxf *rxf)
1121{
1122 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1123 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1124 else {
1125 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
1126 __rxf_enable(rxf);
1127 }
1128}
1129
1130static void
1131rxf_cb_enabled(void *arg, int status)
1132{
1133 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1134
1135 bfa_q_qe_init(&rxf->mbox_qe.qe);
1136 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1137}
1138
1139static void
1140rxf_disable(struct bna_rxf *rxf)
1141{
1142 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1143 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1144 else
1145 rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
1146 __rxf_disable(rxf);
1147}
1148
1149static void
1150rxf_cb_disabled(void *arg, int status)
1151{
1152 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1153
1154 bfa_q_qe_init(&rxf->mbox_qe.qe);
1155 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1156}
1157
1158void
1159rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
1160{
1161 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1162
1163 bfa_q_qe_init(&rxf->mbox_qe.qe);
1164
1165 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
1166}
1167
1168static void
1169bna_rxf_cb_stats_cleared(void *arg, int status)
1170{
1171 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1172
1173 bfa_q_qe_init(&rxf->mbox_qe.qe);
1174 bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
1175}
1176
1177void
1178rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
1179 const struct bna_mac *mac_addr)
1180{
1181 struct bfi_ll_mac_addr_req req;
1182
1183 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
1184
1185 req.rxf_id = rxf->rxf_id;
1186 memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
1187
1188 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
1189 rxf_cb_cam_fltr_mbox_cmd, rxf);
1190
1191 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1192}
1193
1194static int
1195rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
1196{
1197 struct bna_mac *mac = NULL;
1198 struct list_head *qe;
1199
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf->mcast_pending_add_q)) {
1202 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1203 bfa_q_qe_init(qe);
1204 mac = (struct bna_mac *)qe;
1205 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
1206 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1207 return 1;
1208 }
1209
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf->mcast_pending_del_q)) {
1212 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1213 bfa_q_qe_init(qe);
1214 mac = (struct bna_mac *)qe;
1215 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1216 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1217 return 1;
1218 }
1219
1220 return 0;
1221}
1222
1223static int
1224rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
1225{
1226 /* Apply the VLAN filter */
1227 if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
1228 rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
1229 if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
1230 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
1231 }
1232
1233 /* Apply RSS configuration */
1234 if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
1235 rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
1236 if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
1237 /* RSS is being disabled */
1238 rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
1239 __rxf_rit_set(rxf);
1240 __rxf_config_set(rxf);
1241 } else {
1242 /* RSS is being enabled or reconfigured */
1243 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
1244 __rxf_rit_set(rxf);
1245 __rxf_config_set(rxf);
1246 }
1247 }
1248
1249 return 0;
1250}
1251
1252/**
1253 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1254 * command. Also processes pending filter configuration - promiscuous mode,
1255 * default mode, allmutli mode and issues mailbox command or directly applies
1256 * to h/w
1257 */
1258static int
1259rxf_process_packet_filter(struct bna_rxf *rxf)
1260{
1261 /* Set the default MAC first */
1262 if (rxf->ucast_pending_set > 0) {
1263 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
1264 rxf->ucast_active_mac);
1265 rxf->ucast_pending_set--;
1266 return 1;
1267 }
1268
1269 if (rxf_process_packet_filter_ucast(rxf))
1270 return 1;
1271
1272 if (rxf_process_packet_filter_mcast(rxf))
1273 return 1;
1274
1275 if (rxf_process_packet_filter_promisc(rxf))
1276 return 1;
1277
1278 if (rxf_process_packet_filter_allmulti(rxf))
1279 return 1;
1280
1281 if (rxf_process_packet_filter_vlan(rxf))
1282 return 1;
1283
1284 return 0;
1285}
1286
1287static int
1288rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
1289{
1290 struct bna_mac *mac = NULL;
1291 struct list_head *qe;
1292
1293 /* 3. delete pending mcast entries */
1294 if (!list_empty(&rxf->mcast_pending_del_q)) {
1295 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1296 bfa_q_qe_init(qe);
1297 mac = (struct bna_mac *)qe;
1298 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1299 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1300 return 1;
1301 }
1302
1303 /* 4. clear active mcast entries; move them to pending_add_q */
1304 if (!list_empty(&rxf->mcast_active_q)) {
1305 bfa_q_deq(&rxf->mcast_active_q, &qe);
1306 bfa_q_qe_init(qe);
1307 mac = (struct bna_mac *)qe;
1308 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1309 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1310 return 1;
1311 }
1312
1313 return 0;
1314}
1315
1316/**
1317 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1318 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1319 * so that they are added to CAM again in the rxf start path. Moves the current
1320 * filter settings - promiscuous, default, allmutli - to pending filter
1321 * configuration
1322 */
1323static int
1324rxf_clear_packet_filter(struct bna_rxf *rxf)
1325{
1326 if (rxf_clear_packet_filter_ucast(rxf))
1327 return 1;
1328
1329 if (rxf_clear_packet_filter_mcast(rxf))
1330 return 1;
1331
1332 /* 5. clear active default MAC in the CAM */
1333 if (rxf->ucast_pending_set > 0)
1334 rxf->ucast_pending_set = 0;
1335
1336 if (rxf_clear_packet_filter_promisc(rxf))
1337 return 1;
1338
1339 if (rxf_clear_packet_filter_allmulti(rxf))
1340 return 1;
1341
1342 return 0;
1343}
1344
1345static void
1346rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
1347{
1348 struct list_head *qe;
1349 struct bna_mac *mac;
1350
1351 /* 3. Move active mcast entries to pending_add_q */
1352 while (!list_empty(&rxf->mcast_active_q)) {
1353 bfa_q_deq(&rxf->mcast_active_q, &qe);
1354 bfa_q_qe_init(qe);
1355 list_add_tail(qe, &rxf->mcast_pending_add_q);
1356 }
1357
1358 /* 4. Throw away delete pending mcast entries */
1359 while (!list_empty(&rxf->mcast_pending_del_q)) {
1360 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1361 bfa_q_qe_init(qe);
1362 mac = (struct bna_mac *)qe;
1363 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1364 }
1365}
1366
1367/**
1368 * In the rxf fail path, throws away the ucast/mcast entries pending for
1369 * deletion, moves all active ucast/mcast entries to pending queue so that
1370 * they are added back to CAM in the rxf start path. Also moves the current
1371 * filter configuration to pending filter configuration.
1372 */
1373static void
1374rxf_reset_packet_filter(struct bna_rxf *rxf)
1375{
1376 rxf_reset_packet_filter_ucast(rxf);
1377
1378 rxf_reset_packet_filter_mcast(rxf);
1379
1380 /* 5. Turn off ucast set flag */
1381 rxf->ucast_pending_set = 0;
1382
1383 rxf_reset_packet_filter_promisc(rxf);
1384
1385 rxf_reset_packet_filter_allmulti(rxf);
1386}
1387
1388static void
1389bna_rxf_init(struct bna_rxf *rxf,
1390 struct bna_rx *rx,
1391 struct bna_rx_config *q_config)
1392{
1393 struct list_head *qe;
1394 struct bna_rxp *rxp;
1395
1396 /* rxf_id is initialized during rx_mod init */
1397 rxf->rx = rx;
1398
1399 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
1400 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
1401 rxf->ucast_pending_set = 0;
1402 INIT_LIST_HEAD(&rxf->ucast_active_q);
1403 rxf->ucast_active_mac = NULL;
1404
1405 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
1406 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
1407 INIT_LIST_HEAD(&rxf->mcast_active_q);
1408
1409 bfa_q_qe_init(&rxf->mbox_qe.qe);
1410
1411 if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
1412 rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
1413
1414 rxf->rxf_oper_state = (q_config->paused) ?
1415 BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
1416
1417 bna_rxf_adv_init(rxf, rx, q_config);
1418
1419 rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
1420 q_config->num_paths);
1421
1422 list_for_each(qe, &rx->rxp_q) {
1423 rxp = (struct bna_rxp *)qe;
1424 if (q_config->rxp_type == BNA_RXP_SINGLE)
1425 rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
1426 else
1427 rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
1428 break;
1429 }
1430
1431 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
1432 memset(rxf->vlan_filter_table, 0,
1433 (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
1434
1435 /* Set up VLAN 0 for pure priority tagged packets */
1436 rxf->vlan_filter_table[0] |= 1;
1437
1438 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1439}
1440
1441static void
1442bna_rxf_uninit(struct bna_rxf *rxf)
1443{
1444 struct bna *bna = rxf->rx->bna;
1445 struct bna_mac *mac;
1446
1447 bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
1448 rxf->rit_segment = NULL;
1449
1450 rxf->ucast_pending_set = 0;
1451
1452 while (!list_empty(&rxf->ucast_pending_add_q)) {
1453 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
1454 bfa_q_qe_init(&mac->qe);
1455 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1456 }
1457
1458 if (rxf->ucast_active_mac) {
1459 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1460 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
1461 rxf->ucast_active_mac);
1462 rxf->ucast_active_mac = NULL;
1463 }
1464
1465 while (!list_empty(&rxf->mcast_pending_add_q)) {
1466 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
1467 bfa_q_qe_init(&mac->qe);
1468 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1469 }
1470
1471 /* Turn off pending promisc mode */
1472 if (is_promisc_enable(rxf->rxmode_pending,
1473 rxf->rxmode_pending_bitmask)) {
1474 /* system promisc state should be pending */
1475 BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
1476 promisc_inactive(rxf->rxmode_pending,
1477 rxf->rxmode_pending_bitmask);
1478 bna->rxf_promisc_id = BFI_MAX_RXF;
1479 }
1480 /* Promisc mode should not be active */
1481 BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
1482
1483 /* Turn off pending all-multi mode */
1484 if (is_allmulti_enable(rxf->rxmode_pending,
1485 rxf->rxmode_pending_bitmask)) {
1486 allmulti_inactive(rxf->rxmode_pending,
1487 rxf->rxmode_pending_bitmask);
1488 }
1489 /* Allmulti mode should not be active */
1490 BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
1491
1492 rxf->rx = NULL;
1493}
1494
1495static void
1496bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
1497{
1498 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
1499 if (rx->rxf.rxf_id < 32)
1500 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
1501 else
1502 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
1503 1 << (rx->rxf.rxf_id - 32));
1504}
1505
1506static void
1507bna_rxf_start(struct bna_rxf *rxf)
1508{
1509 rxf->start_cbfn = bna_rx_cb_rxf_started;
1510 rxf->start_cbarg = rxf->rx;
1511 rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
1512 bfa_fsm_send_event(rxf, RXF_E_START);
1513}
1514
1515static void
1516bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
1517{
1518 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
1519 if (rx->rxf.rxf_id < 32)
1520 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
1521 else
1522 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
1523 1 << (rx->rxf.rxf_id - 32);
1524}
1525
1526static void
1527bna_rxf_stop(struct bna_rxf *rxf)
1528{
1529 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
1530 rxf->stop_cbarg = rxf->rx;
1531 bfa_fsm_send_event(rxf, RXF_E_STOP);
1532}
1533
1534static void
1535bna_rxf_fail(struct bna_rxf *rxf)
1536{
1537 rxf->rxf_flags |= BNA_RXF_FL_FAILED;
1538 bfa_fsm_send_event(rxf, RXF_E_FAIL);
1539}
1540
1541int
1542bna_rxf_state_get(struct bna_rxf *rxf)
1543{
1544 return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
1545}
1546
1547enum bna_cb_status
1548bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
1549 void (*cbfn)(struct bnad *, struct bna_rx *,
1550 enum bna_cb_status))
1551{
1552 struct bna_rxf *rxf = &rx->rxf;
1553
1554 if (rxf->ucast_active_mac == NULL) {
1555 rxf->ucast_active_mac =
1556 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
1557 if (rxf->ucast_active_mac == NULL)
1558 return BNA_CB_UCAST_CAM_FULL;
1559 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1560 }
1561
1562 memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
1563 rxf->ucast_pending_set++;
1564 rxf->cam_fltr_cbfn = cbfn;
1565 rxf->cam_fltr_cbarg = rx->bna->bnad;
1566
1567 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1568
1569 return BNA_CB_SUCCESS;
1570}
1571
1572enum bna_cb_status
1573bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1574 void (*cbfn)(struct bnad *, struct bna_rx *,
1575 enum bna_cb_status))
1576{
1577 struct bna_rxf *rxf = &rx->rxf;
1578 struct list_head *qe;
1579 struct bna_mac *mac;
1580
1581 /* Check if already added */
1582 list_for_each(qe, &rxf->mcast_active_q) {
1583 mac = (struct bna_mac *)qe;
1584 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1585 if (cbfn)
1586 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1587 return BNA_CB_SUCCESS;
1588 }
1589 }
1590
1591 /* Check if pending addition */
1592 list_for_each(qe, &rxf->mcast_pending_add_q) {
1593 mac = (struct bna_mac *)qe;
1594 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1595 if (cbfn)
1596 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1597 return BNA_CB_SUCCESS;
1598 }
1599 }
1600
1601 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1602 if (mac == NULL)
1603 return BNA_CB_MCAST_LIST_FULL;
1604 bfa_q_qe_init(&mac->qe);
1605 memcpy(mac->addr, addr, ETH_ALEN);
1606 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1607
1608 rxf->cam_fltr_cbfn = cbfn;
1609 rxf->cam_fltr_cbarg = rx->bna->bnad;
1610
1611 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1612
1613 return BNA_CB_SUCCESS;
1614}
1615
1616enum bna_cb_status
1617bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1618 void (*cbfn)(struct bnad *, struct bna_rx *,
1619 enum bna_cb_status))
1620{
1621 struct bna_rxf *rxf = &rx->rxf;
1622 struct list_head list_head;
1623 struct list_head *qe;
1624 u8 *mcaddr;
1625 struct bna_mac *mac;
1626 struct bna_mac *mac1;
1627 int skip;
1628 int delete;
1629 int need_hw_config = 0;
1630 int i;
1631
1632 /* Allocate nodes */
1633 INIT_LIST_HEAD(&list_head);
1634 for (i = 0, mcaddr = mclist; i < count; i++) {
1635 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1636 if (mac == NULL)
1637 goto err_return;
1638 bfa_q_qe_init(&mac->qe);
1639 memcpy(mac->addr, mcaddr, ETH_ALEN);
1640 list_add_tail(&mac->qe, &list_head);
1641
1642 mcaddr += ETH_ALEN;
1643 }
1644
1645 /* Schedule for addition */
1646 while (!list_empty(&list_head)) {
1647 bfa_q_deq(&list_head, &qe);
1648 mac = (struct bna_mac *)qe;
1649 bfa_q_qe_init(&mac->qe);
1650
1651 skip = 0;
1652
1653 /* Skip if already added */
1654 list_for_each(qe, &rxf->mcast_active_q) {
1655 mac1 = (struct bna_mac *)qe;
1656 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1657 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1658 mac);
1659 skip = 1;
1660 break;
1661 }
1662 }
1663
1664 if (skip)
1665 continue;
1666
1667 /* Skip if pending addition */
1668 list_for_each(qe, &rxf->mcast_pending_add_q) {
1669 mac1 = (struct bna_mac *)qe;
1670 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1671 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1672 mac);
1673 skip = 1;
1674 break;
1675 }
1676 }
1677
1678 if (skip)
1679 continue;
1680
1681 need_hw_config = 1;
1682 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1683 }
1684
1685 /**
1686 * Delete the entries that are in the pending_add_q but not
1687 * in the new list
1688 */
1689 while (!list_empty(&rxf->mcast_pending_add_q)) {
1690 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1691 mac = (struct bna_mac *)qe;
1692 bfa_q_qe_init(&mac->qe);
1693 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1694 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1695 delete = 0;
1696 break;
1697 }
1698 mcaddr += ETH_ALEN;
1699 }
1700 if (delete)
1701 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1702 else
1703 list_add_tail(&mac->qe, &list_head);
1704 }
1705 while (!list_empty(&list_head)) {
1706 bfa_q_deq(&list_head, &qe);
1707 mac = (struct bna_mac *)qe;
1708 bfa_q_qe_init(&mac->qe);
1709 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1710 }
1711
1712 /**
1713 * Schedule entries for deletion that are in the active_q but not
1714 * in the new list
1715 */
1716 while (!list_empty(&rxf->mcast_active_q)) {
1717 bfa_q_deq(&rxf->mcast_active_q, &qe);
1718 mac = (struct bna_mac *)qe;
1719 bfa_q_qe_init(&mac->qe);
1720 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1721 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1722 delete = 0;
1723 break;
1724 }
1725 mcaddr += ETH_ALEN;
1726 }
1727 if (delete) {
1728 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
1729 need_hw_config = 1;
1730 } else {
1731 list_add_tail(&mac->qe, &list_head);
1732 }
1733 }
1734 while (!list_empty(&list_head)) {
1735 bfa_q_deq(&list_head, &qe);
1736 mac = (struct bna_mac *)qe;
1737 bfa_q_qe_init(&mac->qe);
1738 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1739 }
1740
1741 if (need_hw_config) {
1742 rxf->cam_fltr_cbfn = cbfn;
1743 rxf->cam_fltr_cbarg = rx->bna->bnad;
1744 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1745 } else if (cbfn)
1746 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1747
1748 return BNA_CB_SUCCESS;
1749
1750err_return:
1751 while (!list_empty(&list_head)) {
1752 bfa_q_deq(&list_head, &qe);
1753 mac = (struct bna_mac *)qe;
1754 bfa_q_qe_init(&mac->qe);
1755 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1756 }
1757
1758 return BNA_CB_MCAST_LIST_FULL;
1759}
1760
1761void
1762bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1763{
1764 struct bna_rxf *rxf = &rx->rxf;
1765 int index = (vlan_id >> 5);
1766 int bit = (1 << (vlan_id & 0x1F));
1767
1768 rxf->vlan_filter_table[index] |= bit;
1769 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1770 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1771 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1772 }
1773}
1774
1775void
1776bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1777{
1778 struct bna_rxf *rxf = &rx->rxf;
1779 int index = (vlan_id >> 5);
1780 int bit = (1 << (vlan_id & 0x1F));
1781
1782 rxf->vlan_filter_table[index] &= ~bit;
1783 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1784 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1785 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1786 }
1787}
1788
1789/**
1790 * RX
1791 */
1792#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1793 struct bna_doorbell_qset *_qset; \
1794 unsigned long off; \
1795 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1796 (q)->rcb->q_depth = (qdepth); \
1797 (q)->rcb->unmap_q = unmapq_mem; \
1798 (q)->rcb->rxq = (q); \
1799 (q)->rcb->cq = &(rxp)->cq; \
1800 (q)->rcb->bnad = (bna)->bnad; \
1801 _qset = (struct bna_doorbell_qset *)0; \
1802 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1803 (q)->rcb->q_dbell = off + \
1804 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1805 (q)->rcb->id = _id; \
1806} while (0)
1807
1808#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1809 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1810
1811#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1812 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1813
1814#define call_rx_stop_callback(rx, status) \
1815 if ((rx)->stop_cbfn) { \
1816 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1817 (rx)->stop_cbfn = NULL; \
1818 (rx)->stop_cbarg = NULL; \
1819 }
1820
1821/*
1822 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1823 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1824 * for each rxpath.
1825 */
1826
1827#define call_rx_disable_cbfn(rx, status) \
1828 if ((rx)->disable_cbfn) { \
1829 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1830 status); \
1831 (rx)->disable_cbfn = NULL; \
1832 (rx)->disable_cbarg = NULL; \
1833 } \
1834
1835#define rxqs_reqd(type, num_rxqs) \
1836 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1837
1838#define rx_ib_fail(rx) \
1839do { \
1840 struct bna_rxp *rxp; \
1841 struct list_head *qe; \
1842 list_for_each(qe, &(rx)->rxp_q) { \
1843 rxp = (struct bna_rxp *)qe; \
1844 bna_ib_fail(rxp->cq.ib); \
1845 } \
1846} while (0)
1847
1848static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
1849static void __bna_rxq_start(struct bna_rxq *rxq);
1850static void __bna_cq_start(struct bna_cq *cq);
1851static void bna_rit_create(struct bna_rx *rx);
1852static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
1853static void bna_rx_cb_rxq_stopped_all(void *arg);
1854
1855bfa_fsm_state_decl(bna_rx, stopped,
1856 struct bna_rx, enum bna_rx_event);
1857bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1858 struct bna_rx, enum bna_rx_event);
1859bfa_fsm_state_decl(bna_rx, started,
1860 struct bna_rx, enum bna_rx_event);
1861bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1862 struct bna_rx, enum bna_rx_event);
1863bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1864 struct bna_rx, enum bna_rx_event);
1865
1866static const struct bfa_sm_table rx_sm_table[] = {
1867 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1868 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1869 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
1870 {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
1871 {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
1872};
1873
1874static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1875{
1876 struct bna_rxp *rxp;
1877 struct list_head *qe_rxp;
1878
1879 list_for_each(qe_rxp, &rx->rxp_q) {
1880 rxp = (struct bna_rxp *)qe_rxp;
1881 rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
1882 }
1883
1884 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1885}
1886
1887static void bna_rx_sm_stopped(struct bna_rx *rx,
1888 enum bna_rx_event event)
1889{
1890 switch (event) {
1891 case RX_E_START:
1892 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1893 break;
1894 case RX_E_STOP:
1895 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1896 break;
1897 case RX_E_FAIL:
1898 /* no-op */
1899 break;
1900 default:
1901 bfa_sm_fault(event);
1902 break;
1903 }
1904
1905}
1906
1907static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1908{
1909 struct bna_rxp *rxp;
1910 struct list_head *qe_rxp;
1911 struct bna_rxq *q0 = NULL, *q1 = NULL;
1912
1913 /* Setup the RIT */
1914 bna_rit_create(rx);
1915
1916 list_for_each(qe_rxp, &rx->rxp_q) {
1917 rxp = (struct bna_rxp *)qe_rxp;
1918 bna_ib_start(rxp->cq.ib);
1919 GET_RXQS(rxp, q0, q1);
1920 q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
1921 __bna_rxq_start(q0);
1922 rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
1923 if (q1) {
1924 __bna_rxq_start(q1);
1925 rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
1926 }
1927 __bna_cq_start(&rxp->cq);
1928 }
1929
1930 bna_rxf_start(&rx->rxf);
1931}
1932
1933static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1934 enum bna_rx_event event)
1935{
1936 switch (event) {
1937 case RX_E_STOP:
1938 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1939 break;
1940 case RX_E_FAIL:
1941 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1942 rx_ib_fail(rx);
1943 bna_rxf_fail(&rx->rxf);
1944 break;
1945 case RX_E_RXF_STARTED:
1946 bfa_fsm_set_state(rx, bna_rx_sm_started);
1947 break;
1948 default:
1949 bfa_sm_fault(event);
1950 break;
1951 }
1952}
1953
1954void
1955bna_rx_sm_started_entry(struct bna_rx *rx)
1956{
1957 struct bna_rxp *rxp;
1958 struct list_head *qe_rxp;
1959
1960 /* Start IB */
1961 list_for_each(qe_rxp, &rx->rxp_q) {
1962 rxp = (struct bna_rxp *)qe_rxp;
1963 bna_ib_ack(&rxp->cq.ib->door_bell, 0);
1964 }
1965
1966 bna_llport_rx_started(&rx->bna->port.llport);
1967}
1968
1969void
1970bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1971{
1972 switch (event) {
1973 case RX_E_FAIL:
1974 bna_llport_rx_stopped(&rx->bna->port.llport);
1975 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1976 rx_ib_fail(rx);
1977 bna_rxf_fail(&rx->rxf);
1978 break;
1979 case RX_E_STOP:
1980 bna_llport_rx_stopped(&rx->bna->port.llport);
1981 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1982 break;
1983 default:
1984 bfa_sm_fault(event);
1985 break;
1986 }
1987}
1988
1989void
1990bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1991{
1992 bna_rxf_stop(&rx->rxf);
1993}
1994
1995void
1996bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1997{
1998 switch (event) {
1999 case RX_E_RXF_STOPPED:
2000 bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
2001 break;
2002 case RX_E_RXF_STARTED:
2003 /**
2004 * RxF was in the process of starting up when
2005 * RXF_E_STOP was issued. Ignore this event
2006 */
2007 break;
2008 case RX_E_FAIL:
2009 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2010 rx_ib_fail(rx);
2011 bna_rxf_fail(&rx->rxf);
2012 break;
2013 default:
2014 bfa_sm_fault(event);
2015 break;
2016 }
2017
2018}
2019
2020void
2021bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
2022{
2023 struct bna_rxp *rxp = NULL;
2024 struct bna_rxq *q0 = NULL;
2025 struct bna_rxq *q1 = NULL;
2026 struct list_head *qe;
2027 u32 rxq_mask[2] = {0, 0};
2028
2029 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2030 bfa_wc_up(&rx->rxq_stop_wc);
2031 list_for_each(qe, &rx->rxp_q) {
2032 rxp = (struct bna_rxp *)qe;
2033 GET_RXQS(rxp, q0, q1);
2034 if (q0->rxq_id < 32)
2035 rxq_mask[0] |= ((u32)1 << q0->rxq_id);
2036 else
2037 rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
2038 if (q1) {
2039 if (q1->rxq_id < 32)
2040 rxq_mask[0] |= ((u32)1 << q1->rxq_id);
2041 else
2042 rxq_mask[1] |= ((u32)
2043 1 << (q1->rxq_id - 32));
2044 }
2045 }
2046
2047 __bna_multi_rxq_stop(rxp, rxq_mask);
2048}
2049
2050void
2051bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
2052{
2053 struct bna_rxp *rxp = NULL;
2054 struct list_head *qe;
2055
2056 switch (event) {
2057 case RX_E_RXQ_STOPPED:
2058 list_for_each(qe, &rx->rxp_q) {
2059 rxp = (struct bna_rxp *)qe;
2060 bna_ib_stop(rxp->cq.ib);
2061 }
2062 /* Fall through */
2063 case RX_E_FAIL:
2064 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2065 break;
2066 default:
2067 bfa_sm_fault(event);
2068 break;
2069 }
2070}
2071
2072void
2073__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
2074{
2075 struct bfi_ll_q_stop_req ll_req;
2076
2077 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
2078 ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
2079 ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
2080 bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
2081 bna_rx_cb_multi_rxq_stopped, rxp);
2082 bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
2083}
2084
2085void
2086__bna_rxq_start(struct bna_rxq *rxq)
2087{
2088 struct bna_rxtx_q_mem *q_mem;
2089 struct bna_rxq_mem rxq_cfg, *rxq_mem;
2090 struct bna_dma_addr cur_q_addr;
2091 /* struct bna_doorbell_qset *qset; */
2092 struct bna_qpt *qpt;
2093 u32 pg_num;
2094 struct bna *bna = rxq->rx->bna;
2095 void __iomem *base_addr;
2096 unsigned long off;
2097
2098 qpt = &rxq->qpt;
2099 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2100
2101 rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2102 rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2103 rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2104 rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2105
2106 rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
2107 rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
2108 (qpt->page_size >> 2);
2109 rxq_cfg.sg_n_cq_n_cns_ptr =
2110 ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
2111 rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
2112 BNA_Q_IDLE_STATE;
2113 rxq_cfg.next_qid = 0x0 | (0x3 << 8);
2114
2115 /* Write the page number register */
2116 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2117 HQM_RXTX_Q_RAM_BASE_OFFSET);
2118 writel(pg_num, bna->regs.page_addr);
2119
2120 /* Write to h/w */
2121 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2122 HQM_RXTX_Q_RAM_BASE_OFFSET);
2123
2124 q_mem = (struct bna_rxtx_q_mem *)0;
2125 rxq_mem = &q_mem[rxq->rxq_id].rxq;
2126
2127 off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
2128 writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
2129
2130 off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
2131 writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
2132
2133 off = (unsigned long)&rxq_mem->cur_q_entry_lo;
2134 writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
2135
2136 off = (unsigned long)&rxq_mem->cur_q_entry_hi;
2137 writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
2138
2139 off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
2140 writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2141
2142 off = (unsigned long)&rxq_mem->entry_n_pg_size;
2143 writel(rxq_cfg.entry_n_pg_size, base_addr + off);
2144
2145 off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
2146 writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
2147
2148 off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
2149 writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
2150
2151 off = (unsigned long)&rxq_mem->next_qid;
2152 writel(rxq_cfg.next_qid, base_addr + off);
2153
2154 rxq->rcb->producer_index = 0;
2155 rxq->rcb->consumer_index = 0;
2156}
2157
2158void
2159__bna_cq_start(struct bna_cq *cq)
2160{
2161 struct bna_cq_mem cq_cfg, *cq_mem;
2162 const struct bna_qpt *qpt;
2163 struct bna_dma_addr cur_q_addr;
2164 u32 pg_num;
2165 struct bna *bna = cq->rx->bna;
2166 void __iomem *base_addr;
2167 unsigned long off;
2168
2169 qpt = &cq->qpt;
2170 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2171
2172 /*
2173 * Fill out structure, to be subsequently written
2174 * to hardware
2175 */
2176 cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2177 cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2178 cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2179 cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2180
2181 cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
2182 cq_cfg.entry_n_pg_size =
2183 ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
2184 cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
2185 ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
2186 cq_cfg.q_state = BNA_Q_IDLE_STATE;
2187
2188 /* Write the page number register */
2189 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2190 HQM_CQ_RAM_BASE_OFFSET);
2191
2192 writel(pg_num, bna->regs.page_addr);
2193
2194 /* H/W write */
2195 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2196 HQM_CQ_RAM_BASE_OFFSET);
2197
2198 cq_mem = (struct bna_cq_mem *)0;
2199
2200 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
2201 writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
2202
2203 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
2204 writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
2205
2206 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
2207 writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
2208
2209 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
2210 writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
2211
2212 off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
2213 writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2214
2215 off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
2216 writel(cq_cfg.entry_n_pg_size, base_addr + off);
2217
2218 off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
2219 writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
2220
2221 off = (unsigned long)&cq_mem[cq->cq_id].q_state;
2222 writel(cq_cfg.q_state, base_addr + off);
2223
2224 cq->ccb->producer_index = 0;
2225 *(cq->ccb->hw_producer_index) = 0;
2226}
2227
2228void
2229bna_rit_create(struct bna_rx *rx)
2230{
2231 struct list_head *qe_rxp;
2232 struct bna_rxp *rxp;
2233 struct bna_rxq *q0 = NULL;
2234 struct bna_rxq *q1 = NULL;
2235 int offset;
2236
2237 offset = 0;
2238 list_for_each(qe_rxp, &rx->rxp_q) {
2239 rxp = (struct bna_rxp *)qe_rxp;
2240 GET_RXQS(rxp, q0, q1);
2241 rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
2242 rx->rxf.rit_segment->rit[offset].small_rxq_id =
2243 (q1 ? q1->rxq_id : 0);
2244 offset++;
2245 }
2246}
2247
2248static int
2249_rx_can_satisfy(struct bna_rx_mod *rx_mod,
2250 struct bna_rx_config *rx_cfg)
2251{
2252 if ((rx_mod->rx_free_count == 0) ||
2253 (rx_mod->rxp_free_count == 0) ||
2254 (rx_mod->rxq_free_count == 0))
2255 return 0;
2256
2257 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
2258 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2259 (rx_mod->rxq_free_count < rx_cfg->num_paths))
2260 return 0;
2261 } else {
2262 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2263 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
2264 return 0;
2265 }
2266
2267 if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
2268 return 0;
2269
2270 return 1;
2271}
2272
2273static struct bna_rxq *
2274_get_free_rxq(struct bna_rx_mod *rx_mod)
2275{
2276 struct bna_rxq *rxq = NULL;
2277 struct list_head *qe = NULL;
2278
2279 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
2280 if (qe) {
2281 rx_mod->rxq_free_count--;
2282 rxq = (struct bna_rxq *)qe;
2283 }
2284 return rxq;
2285}
2286
2287static void
2288_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2289{
2290 bfa_q_qe_init(&rxq->qe);
2291 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
2292 rx_mod->rxq_free_count++;
2293}
2294
2295static struct bna_rxp *
2296_get_free_rxp(struct bna_rx_mod *rx_mod)
2297{
2298 struct list_head *qe = NULL;
2299 struct bna_rxp *rxp = NULL;
2300
2301 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
2302 if (qe) {
2303 rx_mod->rxp_free_count--;
2304
2305 rxp = (struct bna_rxp *)qe;
2306 }
2307
2308 return rxp;
2309}
2310
2311static void
2312_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2313{
2314 bfa_q_qe_init(&rxp->qe);
2315 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
2316 rx_mod->rxp_free_count++;
2317}
2318
2319static struct bna_rx *
2320_get_free_rx(struct bna_rx_mod *rx_mod)
2321{
2322 struct list_head *qe = NULL;
2323 struct bna_rx *rx = NULL;
2324
2325 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2326 if (qe) {
2327 rx_mod->rx_free_count--;
2328
2329 rx = (struct bna_rx *)qe;
2330 bfa_q_qe_init(qe);
2331 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2332 }
2333
2334 return rx;
2335}
2336
2337static void
2338_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2339{
2340 bfa_q_qe_init(&rx->qe);
2341 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2342 rx_mod->rx_free_count++;
2343}
2344
2345static void
2346_rx_init(struct bna_rx *rx, struct bna *bna)
2347{
2348 rx->bna = bna;
2349 rx->rx_flags = 0;
2350
2351 INIT_LIST_HEAD(&rx->rxp_q);
2352
2353 rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
2354 rx->rxq_stop_wc.wc_cbarg = rx;
2355 rx->rxq_stop_wc.wc_count = 0;
2356
2357 rx->stop_cbfn = NULL;
2358 rx->stop_cbarg = NULL;
2359}
2360
2361static void
2362_rxp_add_rxqs(struct bna_rxp *rxp,
2363 struct bna_rxq *q0,
2364 struct bna_rxq *q1)
2365{
2366 switch (rxp->type) {
2367 case BNA_RXP_SINGLE:
2368 rxp->rxq.single.only = q0;
2369 rxp->rxq.single.reserved = NULL;
2370 break;
2371 case BNA_RXP_SLR:
2372 rxp->rxq.slr.large = q0;
2373 rxp->rxq.slr.small = q1;
2374 break;
2375 case BNA_RXP_HDS:
2376 rxp->rxq.hds.data = q0;
2377 rxp->rxq.hds.hdr = q1;
2378 break;
2379 default:
2380 break;
2381 }
2382}
2383
2384static void
2385_rxq_qpt_init(struct bna_rxq *rxq,
2386 struct bna_rxp *rxp,
2387 u32 page_count,
2388 u32 page_size,
2389 struct bna_mem_descr *qpt_mem,
2390 struct bna_mem_descr *swqpt_mem,
2391 struct bna_mem_descr *page_mem)
2392{
2393 int i;
2394
2395 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2396 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2397 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2398 rxq->qpt.page_count = page_count;
2399 rxq->qpt.page_size = page_size;
2400
2401 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2402
2403 for (i = 0; i < rxq->qpt.page_count; i++) {
2404 rxq->rcb->sw_qpt[i] = page_mem[i].kva;
2405 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2406 page_mem[i].dma.lsb;
2407 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2408 page_mem[i].dma.msb;
2409
2410 }
2411}
2412
2413static void
2414_rxp_cqpt_setup(struct bna_rxp *rxp,
2415 u32 page_count,
2416 u32 page_size,
2417 struct bna_mem_descr *qpt_mem,
2418 struct bna_mem_descr *swqpt_mem,
2419 struct bna_mem_descr *page_mem)
2420{
2421 int i;
2422
2423 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2424 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2425 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2426 rxp->cq.qpt.page_count = page_count;
2427 rxp->cq.qpt.page_size = page_size;
2428
2429 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2430
2431 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2432 rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
2433
2434 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2435 page_mem[i].dma.lsb;
2436 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2437 page_mem[i].dma.msb;
2438
2439 }
2440}
2441
2442static void
2443_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
2444{
2445 list_add_tail(&rxp->qe, &rx->rxp_q);
2446}
2447
2448static void
2449_init_rxmod_queues(struct bna_rx_mod *rx_mod)
2450{
2451 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2452 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2453 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2454 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2455
2456 rx_mod->rx_free_count = 0;
2457 rx_mod->rxq_free_count = 0;
2458 rx_mod->rxp_free_count = 0;
2459}
2460
2461static void
2462_rx_ctor(struct bna_rx *rx, int id)
2463{
2464 bfa_q_qe_init(&rx->qe);
2465 INIT_LIST_HEAD(&rx->rxp_q);
2466 rx->bna = NULL;
2467
2468 rx->rxf.rxf_id = id;
2469
2470 /* FIXME: mbox_qe ctor()?? */
2471 bfa_q_qe_init(&rx->mbox_qe.qe);
2472
2473 rx->stop_cbfn = NULL;
2474 rx->stop_cbarg = NULL;
2475}
2476
2477void
2478bna_rx_cb_multi_rxq_stopped(void *arg, int status)
2479{
2480 struct bna_rxp *rxp = (struct bna_rxp *)arg;
2481
2482 bfa_wc_down(&rxp->rx->rxq_stop_wc);
2483}
2484
2485void
2486bna_rx_cb_rxq_stopped_all(void *arg)
2487{
2488 struct bna_rx *rx = (struct bna_rx *)arg;
2489
2490 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
2491}
2492
2493static void
2494bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2495 enum bna_cb_status status)
2496{
2497 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2498
2499 bfa_wc_down(&rx_mod->rx_stop_wc);
2500}
2501
2502static void
2503bna_rx_mod_cb_rx_stopped_all(void *arg)
2504{
2505 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2506
2507 if (rx_mod->stop_cbfn)
2508 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2509 rx_mod->stop_cbfn = NULL;
2510}
2511
2512static void
2513bna_rx_start(struct bna_rx *rx)
2514{
2515 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2516 if (rx->rx_flags & BNA_RX_F_ENABLE)
2517 bfa_fsm_send_event(rx, RX_E_START);
2518}
2519
2520static void
2521bna_rx_stop(struct bna_rx *rx)
2522{
2523 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2524 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2525 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
2526 else {
2527 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2528 rx->stop_cbarg = &rx->bna->rx_mod;
2529 bfa_fsm_send_event(rx, RX_E_STOP);
2530 }
2531}
2532
2533static void
2534bna_rx_fail(struct bna_rx *rx)
2535{
2536 /* Indicate port is not enabled, and failed */
2537 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2538 rx->rx_flags |= BNA_RX_F_PORT_FAILED;
2539 bfa_fsm_send_event(rx, RX_E_FAIL);
2540}
2541
2542void
2543bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2544{
2545 struct bna_rx *rx;
2546 struct list_head *qe;
2547
2548 rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
2549 if (type == BNA_RX_T_LOOPBACK)
2550 rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
2551
2552 list_for_each(qe, &rx_mod->rx_active_q) {
2553 rx = (struct bna_rx *)qe;
2554 if (rx->type == type)
2555 bna_rx_start(rx);
2556 }
2557}
2558
2559void
2560bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2561{
2562 struct bna_rx *rx;
2563 struct list_head *qe;
2564
2565 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2566 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2567
2568 rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
2569
2570 /**
2571 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2572 * as we are going to call bna_rx_stop
2573 */
2574 list_for_each(qe, &rx_mod->rx_active_q) {
2575 rx = (struct bna_rx *)qe;
2576 if (rx->type == type)
2577 bfa_wc_up(&rx_mod->rx_stop_wc);
2578 }
2579
2580 if (rx_mod->rx_stop_wc.wc_count == 0) {
2581 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2582 rx_mod->stop_cbfn = NULL;
2583 return;
2584 }
2585
2586 list_for_each(qe, &rx_mod->rx_active_q) {
2587 rx = (struct bna_rx *)qe;
2588 if (rx->type == type)
2589 bna_rx_stop(rx);
2590 }
2591}
2592
2593void
2594bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2595{
2596 struct bna_rx *rx;
2597 struct list_head *qe;
2598
2599 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2600 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2601
2602 list_for_each(qe, &rx_mod->rx_active_q) {
2603 rx = (struct bna_rx *)qe;
2604 bna_rx_fail(rx);
2605 }
2606}
2607
2608void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2609 struct bna_res_info *res_info)
2610{
2611 int index;
2612 struct bna_rx *rx_ptr;
2613 struct bna_rxp *rxp_ptr;
2614 struct bna_rxq *rxq_ptr;
2615
2616 rx_mod->bna = bna;
2617 rx_mod->flags = 0;
2618
2619 rx_mod->rx = (struct bna_rx *)
2620 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2621 rx_mod->rxp = (struct bna_rxp *)
2622 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2623 rx_mod->rxq = (struct bna_rxq *)
2624 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2625
2626 /* Initialize the queues */
2627 _init_rxmod_queues(rx_mod);
2628
2629 /* Build RX queues */
2630 for (index = 0; index < BFI_MAX_RXQ; index++) {
2631 rx_ptr = &rx_mod->rx[index];
2632 _rx_ctor(rx_ptr, index);
2633 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2634 rx_mod->rx_free_count++;
2635 }
2636
2637 /* build RX-path queue */
2638 for (index = 0; index < BFI_MAX_RXQ; index++) {
2639 rxp_ptr = &rx_mod->rxp[index];
2640 rxp_ptr->cq.cq_id = index;
2641 bfa_q_qe_init(&rxp_ptr->qe);
2642 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2643 rx_mod->rxp_free_count++;
2644 }
2645
2646 /* build RXQ queue */
2647 for (index = 0; index < BFI_MAX_RXQ; index++) {
2648 rxq_ptr = &rx_mod->rxq[index];
2649 rxq_ptr->rxq_id = index;
2650
2651 bfa_q_qe_init(&rxq_ptr->qe);
2652 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2653 rx_mod->rxq_free_count++;
2654 }
2655
2656 rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
2657 rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
2658 rx_mod->rx_stop_wc.wc_count = 0;
2659}
2660
2661void
2662bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2663{
2664 struct list_head *qe;
2665 int i;
2666
2667 i = 0;
2668 list_for_each(qe, &rx_mod->rx_free_q)
2669 i++;
2670
2671 i = 0;
2672 list_for_each(qe, &rx_mod->rxp_free_q)
2673 i++;
2674
2675 i = 0;
2676 list_for_each(qe, &rx_mod->rxq_free_q)
2677 i++;
2678
2679 rx_mod->bna = NULL;
2680}
2681
2682int
2683bna_rx_state_get(struct bna_rx *rx)
2684{
2685 return bfa_sm_to_state(rx_sm_table, rx->fsm);
2686}
2687
2688void
2689bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2690{
2691 u32 cq_size, hq_size, dq_size;
2692 u32 cpage_count, hpage_count, dpage_count;
2693 struct bna_mem_info *mem_info;
2694 u32 cq_depth;
2695 u32 hq_depth;
2696 u32 dq_depth;
2697
2698 dq_depth = q_cfg->q_depth;
2699 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2700 cq_depth = dq_depth + hq_depth;
2701
2702 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2703 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2704 cq_size = ALIGN(cq_size, PAGE_SIZE);
2705 cpage_count = SIZE_TO_PAGES(cq_size);
2706
2707 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2708 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2709 dq_size = ALIGN(dq_size, PAGE_SIZE);
2710 dpage_count = SIZE_TO_PAGES(dq_size);
2711
2712 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2713 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2714 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2715 hq_size = ALIGN(hq_size, PAGE_SIZE);
2716 hpage_count = SIZE_TO_PAGES(hq_size);
2717 } else {
2718 hpage_count = 0;
2719 }
2720
2721 /* CCB structures */
2722 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2723 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2724 mem_info->mem_type = BNA_MEM_T_KVA;
2725 mem_info->len = sizeof(struct bna_ccb);
2726 mem_info->num = q_cfg->num_paths;
2727
2728 /* RCB structures */
2729 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2730 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2731 mem_info->mem_type = BNA_MEM_T_KVA;
2732 mem_info->len = sizeof(struct bna_rcb);
2733 mem_info->num = BNA_GET_RXQS(q_cfg);
2734
2735 /* Completion QPT */
2736 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2737 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2738 mem_info->mem_type = BNA_MEM_T_DMA;
2739 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2740 mem_info->num = q_cfg->num_paths;
2741
2742 /* Completion s/w QPT */
2743 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2744 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2745 mem_info->mem_type = BNA_MEM_T_KVA;
2746 mem_info->len = cpage_count * sizeof(void *);
2747 mem_info->num = q_cfg->num_paths;
2748
2749 /* Completion QPT pages */
2750 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2751 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2752 mem_info->mem_type = BNA_MEM_T_DMA;
2753 mem_info->len = PAGE_SIZE;
2754 mem_info->num = cpage_count * q_cfg->num_paths;
2755
2756 /* Data QPTs */
2757 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2758 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2759 mem_info->mem_type = BNA_MEM_T_DMA;
2760 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2761 mem_info->num = q_cfg->num_paths;
2762
2763 /* Data s/w QPTs */
2764 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2765 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2766 mem_info->mem_type = BNA_MEM_T_KVA;
2767 mem_info->len = dpage_count * sizeof(void *);
2768 mem_info->num = q_cfg->num_paths;
2769
2770 /* Data QPT pages */
2771 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2772 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2773 mem_info->mem_type = BNA_MEM_T_DMA;
2774 mem_info->len = PAGE_SIZE;
2775 mem_info->num = dpage_count * q_cfg->num_paths;
2776
2777 /* Hdr QPTs */
2778 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2779 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2780 mem_info->mem_type = BNA_MEM_T_DMA;
2781 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2782 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2783
2784 /* Hdr s/w QPTs */
2785 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2786 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2787 mem_info->mem_type = BNA_MEM_T_KVA;
2788 mem_info->len = hpage_count * sizeof(void *);
2789 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2790
2791 /* Hdr QPT pages */
2792 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2793 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2794 mem_info->mem_type = BNA_MEM_T_DMA;
2795 mem_info->len = (hpage_count ? PAGE_SIZE : 0);
2796 mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
2797
2798 /* RX Interrupts */
2799 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2800 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2801 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2802}
2803
2804struct bna_rx *
2805bna_rx_create(struct bna *bna, struct bnad *bnad,
2806 struct bna_rx_config *rx_cfg,
2807 struct bna_rx_event_cbfn *rx_cbfn,
2808 struct bna_res_info *res_info,
2809 void *priv)
2810{
2811 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2812 struct bna_rx *rx;
2813 struct bna_rxp *rxp;
2814 struct bna_rxq *q0;
2815 struct bna_rxq *q1;
2816 struct bna_intr_info *intr_info;
2817 u32 page_count;
2818 struct bna_mem_descr *ccb_mem;
2819 struct bna_mem_descr *rcb_mem;
2820 struct bna_mem_descr *unmapq_mem;
2821 struct bna_mem_descr *cqpt_mem;
2822 struct bna_mem_descr *cswqpt_mem;
2823 struct bna_mem_descr *cpage_mem;
2824 struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
2825 struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
2826 struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
2827 struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
2828 struct bna_mem_descr *hpage_mem; /* hdr page mem */
2829 struct bna_mem_descr *dpage_mem; /* data page mem */
2830 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
2831 int dpage_count, hpage_count, rcb_idx;
2832 struct bna_ib_config ibcfg;
2833 /* Fail if we don't have enough RXPs, RXQs */
2834 if (!_rx_can_satisfy(rx_mod, rx_cfg))
2835 return NULL;
2836
2837 /* Initialize resource pointers */
2838 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2839 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2840 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2841 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2842 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2843 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2844 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2845 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2846 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2847 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2848 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2849 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2850 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2851
2852 /* Compute q depth & page count */
2853 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
2854 rx_cfg->num_paths;
2855
2856 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
2857 rx_cfg->num_paths;
2858
2859 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
2860 rx_cfg->num_paths;
2861 /* Get RX pointer */
2862 rx = _get_free_rx(rx_mod);
2863 _rx_init(rx, bna);
2864 rx->priv = priv;
2865 rx->type = rx_cfg->rx_type;
2866
2867 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2868 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2869 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2870 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2871 /* Following callbacks are mandatory */
2872 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2873 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2874
2875 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
2876 switch (rx->type) {
2877 case BNA_RX_T_REGULAR:
2878 if (!(rx->bna->rx_mod.flags &
2879 BNA_RX_MOD_F_PORT_LOOPBACK))
2880 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2881 break;
2882 case BNA_RX_T_LOOPBACK:
2883 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
2884 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2885 break;
2886 }
2887 }
2888
2889 for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
2890 rxp = _get_free_rxp(rx_mod);
2891 rxp->type = rx_cfg->rxp_type;
2892 rxp->rx = rx;
2893 rxp->cq.rx = rx;
2894
2895 /* Get required RXQs, and queue them to rx-path */
2896 q0 = _get_free_rxq(rx_mod);
2897 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2898 q1 = NULL;
2899 else
2900 q1 = _get_free_rxq(rx_mod);
2901
2902 /* Initialize IB */
2903 if (1 == intr_info->num) {
2904 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2905 intr_info->intr_type,
2906 intr_info->idl[0].vector);
2907 rxp->vector = intr_info->idl[0].vector;
2908 } else {
2909 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2910 intr_info->intr_type,
2911 intr_info->idl[i].vector);
2912
2913 /* Map the MSI-x vector used for this RXP */
2914 rxp->vector = intr_info->idl[i].vector;
2915 }
2916
2917 rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
2918
2919 ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2920 ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
2921 ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2922 ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
2923
2924 bna_ib_config(rxp->cq.ib, &ibcfg);
2925
2926 /* Link rxqs to rxp */
2927 _rxp_add_rxqs(rxp, q0, q1);
2928
2929 /* Link rxp to rx */
2930 _rx_add_rxp(rx, rxp);
2931
2932 q0->rx = rx;
2933 q0->rxp = rxp;
2934
2935 /* Initialize RCB for the large / data q */
2936 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2937 RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
2938 (void *)unmapq_mem[rcb_idx].kva);
2939 rcb_idx++;
2940 (q0)->rx_packets = (q0)->rx_bytes = 0;
2941 (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
2942
2943 /* Initialize RXQs */
2944 _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
2945 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2946 q0->rcb->page_idx = dpage_idx;
2947 q0->rcb->page_count = dpage_count;
2948 dpage_idx += dpage_count;
2949
2950 /* Call bnad to complete rcb setup */
2951 if (rx->rcb_setup_cbfn)
2952 rx->rcb_setup_cbfn(bnad, q0->rcb);
2953
2954 if (q1) {
2955 q1->rx = rx;
2956 q1->rxp = rxp;
2957
2958 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2959 RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
2960 (void *)unmapq_mem[rcb_idx].kva);
2961 rcb_idx++;
2962 (q1)->buffer_size = (rx_cfg)->small_buff_size;
2963 (q1)->rx_packets = (q1)->rx_bytes = 0;
2964 (q1)->rx_packets_with_error =
2965 (q1)->rxbuf_alloc_failed = 0;
2966
2967 _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
2968 &hqpt_mem[i], &hsqpt_mem[i],
2969 &hpage_mem[hpage_idx]);
2970 q1->rcb->page_idx = hpage_idx;
2971 q1->rcb->page_count = hpage_count;
2972 hpage_idx += hpage_count;
2973
2974 /* Call bnad to complete rcb setup */
2975 if (rx->rcb_setup_cbfn)
2976 rx->rcb_setup_cbfn(bnad, q1->rcb);
2977 }
2978 /* Setup RXP::CQ */
2979 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2980 _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2981 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
2982 rxp->cq.ccb->page_idx = cpage_idx;
2983 rxp->cq.ccb->page_count = page_count;
2984 cpage_idx += page_count;
2985
2986 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2987 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2988
2989 rxp->cq.ccb->producer_index = 0;
2990 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
2991 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2992 0 : rx_cfg->q_depth);
2993 rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
2994 rxp->cq.ccb->rcb[0] = q0->rcb;
2995 if (q1)
2996 rxp->cq.ccb->rcb[1] = q1->rcb;
2997 rxp->cq.ccb->cq = &rxp->cq;
2998 rxp->cq.ccb->bnad = bna->bnad;
2999 rxp->cq.ccb->hw_producer_index =
3000 ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
3001 (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
3002 *(rxp->cq.ccb->hw_producer_index) = 0;
3003 rxp->cq.ccb->intr_type = intr_info->intr_type;
3004 rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
3005 intr_info->idl[0].vector :
3006 intr_info->idl[i].vector;
3007 rxp->cq.ccb->rx_coalescing_timeo =
3008 rxp->cq.ib->ib_config.coalescing_timeo;
3009 rxp->cq.ccb->id = i;
3010
3011 /* Call bnad to complete CCB setup */
3012 if (rx->ccb_setup_cbfn)
3013 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
3014
3015 } /* for each rx-path */
3016
3017 bna_rxf_init(&rx->rxf, rx, rx_cfg);
3018
3019 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
3020
3021 return rx;
3022}
3023
3024void
3025bna_rx_destroy(struct bna_rx *rx)
3026{
3027 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
3028 struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
3029 struct bna_rxq *q0 = NULL;
3030 struct bna_rxq *q1 = NULL;
3031 struct bna_rxp *rxp;
3032 struct list_head *qe;
3033
3034 bna_rxf_uninit(&rx->rxf);
3035
3036 while (!list_empty(&rx->rxp_q)) {
3037 bfa_q_deq(&rx->rxp_q, &rxp);
3038 GET_RXQS(rxp, q0, q1);
3039 /* Callback to bnad for destroying RCB */
3040 if (rx->rcb_destroy_cbfn)
3041 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
3042 q0->rcb = NULL;
3043 q0->rxp = NULL;
3044 q0->rx = NULL;
3045 _put_free_rxq(rx_mod, q0);
3046 if (q1) {
3047 /* Callback to bnad for destroying RCB */
3048 if (rx->rcb_destroy_cbfn)
3049 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
3050 q1->rcb = NULL;
3051 q1->rxp = NULL;
3052 q1->rx = NULL;
3053 _put_free_rxq(rx_mod, q1);
3054 }
3055 rxp->rxq.slr.large = NULL;
3056 rxp->rxq.slr.small = NULL;
3057 if (rxp->cq.ib) {
3058 if (rxp->cq.ib_seg_offset != 0xff)
3059 bna_ib_release_idx(rxp->cq.ib,
3060 rxp->cq.ib_seg_offset);
3061 bna_ib_put(ib_mod, rxp->cq.ib);
3062 rxp->cq.ib = NULL;
3063 }
3064 /* Callback to bnad for destroying CCB */
3065 if (rx->ccb_destroy_cbfn)
3066 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
3067 rxp->cq.ccb = NULL;
3068 rxp->rx = NULL;
3069 _put_free_rxp(rx_mod, rxp);
3070 }
3071
3072 list_for_each(qe, &rx_mod->rx_active_q) {
3073 if (qe == &rx->qe) {
3074 list_del(&rx->qe);
3075 bfa_q_qe_init(&rx->qe);
3076 break;
3077 }
3078 }
3079
3080 rx->bna = NULL;
3081 rx->priv = NULL;
3082 _put_free_rx(rx_mod, rx);
3083}
3084
3085void
3086bna_rx_enable(struct bna_rx *rx)
3087{
3088 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
3089 return;
3090
3091 rx->rx_flags |= BNA_RX_F_ENABLE;
3092 if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
3093 bfa_fsm_send_event(rx, RX_E_START);
3094}
3095
3096void
3097bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
3098 void (*cbfn)(void *, struct bna_rx *,
3099 enum bna_cb_status))
3100{
3101 if (type == BNA_SOFT_CLEANUP) {
3102 /* h/w should not be accessed. Treat we're stopped */
3103 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
3104 } else {
3105 rx->stop_cbfn = cbfn;
3106 rx->stop_cbarg = rx->bna->bnad;
3107
3108 rx->rx_flags &= ~BNA_RX_F_ENABLE;
3109
3110 bfa_fsm_send_event(rx, RX_E_STOP);
3111 }
3112}
3113
3114/**
3115 * TX
3116 */
3117#define call_tx_stop_cbfn(tx, status)\
3118do {\
3119 if ((tx)->stop_cbfn)\
3120 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3121 (tx)->stop_cbfn = NULL;\
3122 (tx)->stop_cbarg = NULL;\
3123} while (0)
3124
3125#define call_tx_prio_change_cbfn(tx, status)\
3126do {\
3127 if ((tx)->prio_change_cbfn)\
3128 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3129 (tx)->prio_change_cbfn = NULL;\
3130} while (0)
3131
3132static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
3133 enum bna_cb_status status);
3134static void bna_tx_cb_txq_stopped(void *arg, int status);
3135static void bna_tx_cb_stats_cleared(void *arg, int status);
3136static void __bna_tx_stop(struct bna_tx *tx);
3137static void __bna_tx_start(struct bna_tx *tx);
3138static void __bna_txf_stat_clr(struct bna_tx *tx);
3139
3140enum bna_tx_event {
3141 TX_E_START = 1,
3142 TX_E_STOP = 2,
3143 TX_E_FAIL = 3,
3144 TX_E_TXQ_STOPPED = 4,
3145 TX_E_PRIO_CHANGE = 5,
3146 TX_E_STAT_CLEARED = 6,
3147};
3148
3149enum bna_tx_state {
3150 BNA_TX_STOPPED = 1,
3151 BNA_TX_STARTED = 2,
3152 BNA_TX_TXQ_STOP_WAIT = 3,
3153 BNA_TX_PRIO_STOP_WAIT = 4,
3154 BNA_TX_STAT_CLR_WAIT = 5,
3155};
3156
3157bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
3158 enum bna_tx_event);
3159bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
3160 enum bna_tx_event);
3161bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
3162 enum bna_tx_event);
3163bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3164 enum bna_tx_event);
3165bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
3166 enum bna_tx_event);
3167
3168static struct bfa_sm_table tx_sm_table[] = {
3169 {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
3170 {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
3171 {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
3172 {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
3173 {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
3174};
3175
3176static void
3177bna_tx_sm_stopped_entry(struct bna_tx *tx)
3178{
3179 struct bna_txq *txq;
3180 struct list_head *qe;
3181
3182 list_for_each(qe, &tx->txq_q) {
3183 txq = (struct bna_txq *)qe;
3184 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3185 }
3186
3187 call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
3188}
3189
3190static void
3191bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3192{
3193 switch (event) {
3194 case TX_E_START:
3195 bfa_fsm_set_state(tx, bna_tx_sm_started);
3196 break;
3197
3198 case TX_E_STOP:
3199 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3200 break;
3201
3202 case TX_E_FAIL:
3203 /* No-op */
3204 break;
3205
3206 case TX_E_PRIO_CHANGE:
3207 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3208 break;
3209
3210 case TX_E_TXQ_STOPPED:
3211 /**
3212 * This event is received due to flushing of mbox when
3213 * device fails
3214 */
3215 /* No-op */
3216 break;
3217
3218 default:
3219 bfa_sm_fault(event);
3220 }
3221}
3222
3223static void
3224bna_tx_sm_started_entry(struct bna_tx *tx)
3225{
3226 struct bna_txq *txq;
3227 struct list_head *qe;
3228
3229 __bna_tx_start(tx);
3230
3231 /* Start IB */
3232 list_for_each(qe, &tx->txq_q) {
3233 txq = (struct bna_txq *)qe;
3234 bna_ib_ack(&txq->ib->door_bell, 0);
3235 }
3236}
3237
3238static void
3239bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3240{
3241 struct bna_txq *txq;
3242 struct list_head *qe;
3243
3244 switch (event) {
3245 case TX_E_STOP:
3246 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3247 __bna_tx_stop(tx);
3248 break;
3249
3250 case TX_E_FAIL:
3251 list_for_each(qe, &tx->txq_q) {
3252 txq = (struct bna_txq *)qe;
3253 bna_ib_fail(txq->ib);
3254 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3255 }
3256 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3257 break;
3258
3259 case TX_E_PRIO_CHANGE:
3260 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3261 break;
3262
3263 default:
3264 bfa_sm_fault(event);
3265 }
3266}
3267
3268static void
3269bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
3270{
3271}
3272
3273static void
3274bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3275{
3276 struct bna_txq *txq;
3277 struct list_head *qe;
3278
3279 switch (event) {
3280 case TX_E_FAIL:
3281 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3282 break;
3283
3284 case TX_E_TXQ_STOPPED:
3285 list_for_each(qe, &tx->txq_q) {
3286 txq = (struct bna_txq *)qe;
3287 bna_ib_stop(txq->ib);
3288 }
3289 bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
3290 break;
3291
3292 case TX_E_PRIO_CHANGE:
3293 /* No-op */
3294 break;
3295
3296 default:
3297 bfa_sm_fault(event);
3298 }
3299}
3300
3301static void
3302bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3303{
3304 __bna_tx_stop(tx);
3305}
3306
3307static void
3308bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3309{
3310 struct bna_txq *txq;
3311 struct list_head *qe;
3312
3313 switch (event) {
3314 case TX_E_STOP:
3315 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3316 break;
3317
3318 case TX_E_FAIL:
3319 call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
3320 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3321 break;
3322
3323 case TX_E_TXQ_STOPPED:
3324 list_for_each(qe, &tx->txq_q) {
3325 txq = (struct bna_txq *)qe;
3326 bna_ib_stop(txq->ib);
3327 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3328 }
3329 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3330 bfa_fsm_set_state(tx, bna_tx_sm_started);
3331 break;
3332
3333 case TX_E_PRIO_CHANGE:
3334 /* No-op */
3335 break;
3336
3337 default:
3338 bfa_sm_fault(event);
3339 }
3340}
3341
3342static void
3343bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
3344{
3345 __bna_txf_stat_clr(tx);
3346}
3347
3348static void
3349bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
3350{
3351 switch (event) {
3352 case TX_E_FAIL:
3353 case TX_E_STAT_CLEARED:
3354 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3355 break;
3356
3357 default:
3358 bfa_sm_fault(event);
3359 }
3360}
3361
3362static void
3363__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
3364{
3365 struct bna_rxtx_q_mem *q_mem;
3366 struct bna_txq_mem txq_cfg;
3367 struct bna_txq_mem *txq_mem;
3368 struct bna_dma_addr cur_q_addr;
3369 u32 pg_num;
3370 void __iomem *base_addr;
3371 unsigned long off;
3372
3373 /* Fill out structure, to be subsequently written to hardware */
3374 txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
3375 txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
3376 cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
3377 txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
3378 txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
3379
3380 txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
3381
3382 txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
3383 (txq->qpt.page_size >> 2);
3384 txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
3385 ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
3386
3387 txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
3388 txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
3389 (txq->priority & 0x7));
3390 txq_cfg.wvc_n_cquota_n_rquota =
3391 ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
3392 (BFI_TX_MAX_WRR_QUOTA & 0xfff));
3393
3394 /* Setup the page and write to H/W */
3395
3396 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
3397 HQM_RXTX_Q_RAM_BASE_OFFSET);
3398 writel(pg_num, tx->bna->regs.page_addr);
3399
3400 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3401 HQM_RXTX_Q_RAM_BASE_OFFSET);
3402 q_mem = (struct bna_rxtx_q_mem *)0;
3403 txq_mem = &q_mem[txq->txq_id].txq;
3404
3405 /*
3406 * The following 4 lines, is a hack b'cos the H/W needs to read
3407 * these DMA addresses as little endian
3408 */
3409
3410 off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
3411 writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
3412
3413 off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
3414 writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
3415
3416 off = (unsigned long)&txq_mem->cur_q_entry_lo;
3417 writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
3418
3419 off = (unsigned long)&txq_mem->cur_q_entry_hi;
3420 writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
3421
3422 off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
3423 writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
3424
3425 off = (unsigned long)&txq_mem->entry_n_pg_size;
3426 writel(txq_cfg.entry_n_pg_size, base_addr + off);
3427
3428 off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
3429 writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
3430
3431 off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
3432 writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
3433
3434 off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
3435 writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
3436
3437 off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
3438 writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
3439
3440 txq->tcb->producer_index = 0;
3441 txq->tcb->consumer_index = 0;
3442 *(txq->tcb->hw_consumer_index) = 0;
3443
3444}
3445
3446static void
3447__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
3448{
3449 struct bfi_ll_q_stop_req ll_req;
3450 u32 bit_mask[2] = {0, 0};
3451 if (txq->txq_id < 32)
3452 bit_mask[0] = (u32)1 << txq->txq_id;
3453 else
3454 bit_mask[1] = (u32)1 << (txq->txq_id - 32);
3455
3456 memset(&ll_req, 0, sizeof(ll_req));
3457 ll_req.mh.msg_class = BFI_MC_LL;
3458 ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
3459 ll_req.mh.mtag.h2i.lpu_id = 0;
3460 ll_req.q_id_mask[0] = htonl(bit_mask[0]);
3461 ll_req.q_id_mask[1] = htonl(bit_mask[1]);
3462
3463 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3464 bna_tx_cb_txq_stopped, tx);
3465
3466 bna_mbox_send(tx->bna, &tx->mbox_qe);
3467}
3468
3469static void
3470__bna_txf_start(struct bna_tx *tx)
3471{
3472 struct bna_tx_fndb_ram *tx_fndb;
3473 struct bna_txf *txf = &tx->txf;
3474 void __iomem *base_addr;
3475 unsigned long off;
3476
3477 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3478 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
3479 tx->bna->regs.page_addr);
3480
3481 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3482 TX_FNDB_RAM_BASE_OFFSET);
3483
3484 tx_fndb = (struct bna_tx_fndb_ram *)0;
3485 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3486
3487 writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
3488 base_addr + off);
3489
3490 if (tx->txf.txf_id < 32)
3491 tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
3492 else
3493 tx->bna->tx_mod.txf_bmap[1] |= ((u32)
3494 1 << (tx->txf.txf_id - 32));
3495}
3496
3497static void
3498__bna_txf_stop(struct bna_tx *tx)
3499{
3500 struct bna_tx_fndb_ram *tx_fndb;
3501 u32 page_num;
3502 u32 ctl_flags;
3503 struct bna_txf *txf = &tx->txf;
3504 void __iomem *base_addr;
3505 unsigned long off;
3506
3507 /* retrieve the running txf_flags & turn off enable bit */
3508 page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3509 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
3510 writel(page_num, tx->bna->regs.page_addr);
3511
3512 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3513 TX_FNDB_RAM_BASE_OFFSET);
3514 tx_fndb = (struct bna_tx_fndb_ram *)0;
3515 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3516
3517 ctl_flags = readl(base_addr + off);
3518 ctl_flags &= ~BFI_TXF_CF_ENABLE;
3519
3520 writel(ctl_flags, base_addr + off);
3521
3522 if (tx->txf.txf_id < 32)
3523 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
3524 else
3525 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
3526 1 << (tx->txf.txf_id - 32));
3527}
3528
3529static void
3530__bna_txf_stat_clr(struct bna_tx *tx)
3531{
3532 struct bfi_ll_stats_req ll_req;
3533 u32 txf_bmap[2] = {0, 0};
3534 if (tx->txf.txf_id < 32)
3535 txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
3536 else
3537 txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
3538 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
3539 ll_req.stats_mask = 0;
3540 ll_req.rxf_id_mask[0] = 0;
3541 ll_req.rxf_id_mask[1] = 0;
3542 ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
3543 ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
3544
3545 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3546 bna_tx_cb_stats_cleared, tx);
3547 bna_mbox_send(tx->bna, &tx->mbox_qe);
3548}
3549
3550static void
3551__bna_tx_start(struct bna_tx *tx)
3552{
3553 struct bna_txq *txq;
3554 struct list_head *qe;
3555
3556 list_for_each(qe, &tx->txq_q) {
3557 txq = (struct bna_txq *)qe;
3558 bna_ib_start(txq->ib);
3559 __bna_txq_start(tx, txq);
3560 }
3561
3562 __bna_txf_start(tx);
3563
3564 list_for_each(qe, &tx->txq_q) {
3565 txq = (struct bna_txq *)qe;
3566 txq->tcb->priority = txq->priority;
3567 (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
3568 }
3569}
3570
3571static void
3572__bna_tx_stop(struct bna_tx *tx)
3573{
3574 struct bna_txq *txq;
3575 struct list_head *qe;
3576
3577 list_for_each(qe, &tx->txq_q) {
3578 txq = (struct bna_txq *)qe;
3579 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3580 }
3581
3582 __bna_txf_stop(tx);
3583
3584 list_for_each(qe, &tx->txq_q) {
3585 txq = (struct bna_txq *)qe;
3586 bfa_wc_up(&tx->txq_stop_wc);
3587 }
3588
3589 list_for_each(qe, &tx->txq_q) {
3590 txq = (struct bna_txq *)qe;
3591 __bna_txq_stop(tx, txq);
3592 }
3593}
3594
3595static void
3596bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3597 struct bna_mem_descr *qpt_mem,
3598 struct bna_mem_descr *swqpt_mem,
3599 struct bna_mem_descr *page_mem)
3600{
3601 int i;
3602
3603 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3604 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3605 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3606 txq->qpt.page_count = page_count;
3607 txq->qpt.page_size = page_size;
3608
3609 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3610
3611 for (i = 0; i < page_count; i++) {
3612 txq->tcb->sw_qpt[i] = page_mem[i].kva;
3613
3614 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3615 page_mem[i].dma.lsb;
3616 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3617 page_mem[i].dma.msb;
3618
3619 }
3620}
3621
3622static void
3623bna_tx_free(struct bna_tx *tx)
3624{
3625 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3626 struct bna_txq *txq;
3627 struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
3628 struct list_head *qe;
3629
3630 while (!list_empty(&tx->txq_q)) {
3631 bfa_q_deq(&tx->txq_q, &txq);
3632 bfa_q_qe_init(&txq->qe);
3633 if (txq->ib) {
3634 if (txq->ib_seg_offset != -1)
3635 bna_ib_release_idx(txq->ib,
3636 txq->ib_seg_offset);
3637 bna_ib_put(ib_mod, txq->ib);
3638 txq->ib = NULL;
3639 }
3640 txq->tcb = NULL;
3641 txq->tx = NULL;
3642 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3643 }
3644
3645 list_for_each(qe, &tx_mod->tx_active_q) {
3646 if (qe == &tx->qe) {
3647 list_del(&tx->qe);
3648 bfa_q_qe_init(&tx->qe);
3649 break;
3650 }
3651 }
3652
3653 tx->bna = NULL;
3654 tx->priv = NULL;
3655 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3656}
3657
3658static void
3659bna_tx_cb_txq_stopped(void *arg, int status)
3660{
3661 struct bna_tx *tx = (struct bna_tx *)arg;
3662
3663 bfa_q_qe_init(&tx->mbox_qe.qe);
3664 bfa_wc_down(&tx->txq_stop_wc);
3665}
3666
3667static void
3668bna_tx_cb_txq_stopped_all(void *arg)
3669{
3670 struct bna_tx *tx = (struct bna_tx *)arg;
3671
3672 bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
3673}
3674
3675static void
3676bna_tx_cb_stats_cleared(void *arg, int status)
3677{
3678 struct bna_tx *tx = (struct bna_tx *)arg;
3679
3680 bfa_q_qe_init(&tx->mbox_qe.qe);
3681
3682 bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
3683}
3684
3685static void
3686bna_tx_start(struct bna_tx *tx)
3687{
3688 tx->flags |= BNA_TX_F_PORT_STARTED;
3689 if (tx->flags & BNA_TX_F_ENABLED)
3690 bfa_fsm_send_event(tx, TX_E_START);
3691}
3692
3693static void
3694bna_tx_stop(struct bna_tx *tx)
3695{
3696 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3697 tx->stop_cbarg = &tx->bna->tx_mod;
3698
3699 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3700 bfa_fsm_send_event(tx, TX_E_STOP);
3701}
3702
3703static void
3704bna_tx_fail(struct bna_tx *tx)
3705{
3706 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3707 bfa_fsm_send_event(tx, TX_E_FAIL);
3708}
3709
3710static void
3711bna_tx_prio_changed(struct bna_tx *tx, int prio)
3712{
3713 struct bna_txq *txq;
3714 struct list_head *qe;
3715
3716 list_for_each(qe, &tx->txq_q) {
3717 txq = (struct bna_txq *)qe;
3718 txq->priority = prio;
3719 }
3720
3721 bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
3722}
3723
3724static void
3725bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
3726{
3727 if (cee_link)
3728 tx->flags |= BNA_TX_F_PRIO_LOCK;
3729 else
3730 tx->flags &= ~BNA_TX_F_PRIO_LOCK;
3731}
3732
3733static void
3734bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
3735 enum bna_cb_status status)
3736{
3737 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3738
3739 bfa_wc_down(&tx_mod->tx_stop_wc);
3740}
3741
3742static void
3743bna_tx_mod_cb_tx_stopped_all(void *arg)
3744{
3745 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3746
3747 if (tx_mod->stop_cbfn)
3748 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
3749 tx_mod->stop_cbfn = NULL;
3750}
3751
3752void
3753bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3754{
3755 u32 q_size;
3756 u32 page_count;
3757 struct bna_mem_info *mem_info;
3758
3759 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3760 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3761 mem_info->mem_type = BNA_MEM_T_KVA;
3762 mem_info->len = sizeof(struct bna_tcb);
3763 mem_info->num = num_txq;
3764
3765 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3766 q_size = ALIGN(q_size, PAGE_SIZE);
3767 page_count = q_size >> PAGE_SHIFT;
3768
3769 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3770 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3771 mem_info->mem_type = BNA_MEM_T_DMA;
3772 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3773 mem_info->num = num_txq;
3774
3775 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3776 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3777 mem_info->mem_type = BNA_MEM_T_KVA;
3778 mem_info->len = page_count * sizeof(void *);
3779 mem_info->num = num_txq;
3780
3781 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3782 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3783 mem_info->mem_type = BNA_MEM_T_DMA;
3784 mem_info->len = PAGE_SIZE;
3785 mem_info->num = num_txq * page_count;
3786
3787 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3788 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3789 BNA_INTR_T_MSIX;
3790 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3791}
3792
3793struct bna_tx *
3794bna_tx_create(struct bna *bna, struct bnad *bnad,
3795 struct bna_tx_config *tx_cfg,
3796 struct bna_tx_event_cbfn *tx_cbfn,
3797 struct bna_res_info *res_info, void *priv)
3798{
3799 struct bna_intr_info *intr_info;
3800 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3801 struct bna_tx *tx;
3802 struct bna_txq *txq;
3803 struct list_head *qe;
3804 struct bna_ib_mod *ib_mod = &bna->ib_mod;
3805 struct bna_doorbell_qset *qset;
3806 struct bna_ib_config ib_config;
3807 int page_count;
3808 int page_size;
3809 int page_idx;
3810 int i;
3811 unsigned long off;
3812
3813 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3814 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
3815 tx_cfg->num_txq;
3816 page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3817
3818 /**
3819 * Get resources
3820 */
3821
3822 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3823 return NULL;
3824
3825 /* Tx */
3826
3827 if (list_empty(&tx_mod->tx_free_q))
3828 return NULL;
3829 bfa_q_deq(&tx_mod->tx_free_q, &tx);
3830 bfa_q_qe_init(&tx->qe);
3831
3832 /* TxQs */
3833
3834 INIT_LIST_HEAD(&tx->txq_q);
3835 for (i = 0; i < tx_cfg->num_txq; i++) {
3836 if (list_empty(&tx_mod->txq_free_q))
3837 goto err_return;
3838
3839 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3840 bfa_q_qe_init(&txq->qe);
3841 list_add_tail(&txq->qe, &tx->txq_q);
3842 txq->ib = NULL;
3843 txq->ib_seg_offset = -1;
3844 txq->tx = tx;
3845 }
3846
3847 /* IBs */
3848 i = 0;
3849 list_for_each(qe, &tx->txq_q) {
3850 txq = (struct bna_txq *)qe;
3851
3852 if (intr_info->num == 1)
3853 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3854 intr_info->idl[0].vector);
3855 else
3856 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3857 intr_info->idl[i].vector);
3858
3859 if (txq->ib == NULL)
3860 goto err_return;
3861
3862 txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
3863 if (txq->ib_seg_offset == -1)
3864 goto err_return;
3865
3866 i++;
3867 }
3868
3869 /*
3870 * Initialize
3871 */
3872
3873 /* Tx */
3874
3875 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3876 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3877 /* Following callbacks are mandatory */
3878 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3879 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3880 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3881
3882 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3883 tx->bna = bna;
3884 tx->priv = priv;
3885 tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
3886 tx->txq_stop_wc.wc_cbarg = tx;
3887 tx->txq_stop_wc.wc_count = 0;
3888
3889 tx->type = tx_cfg->tx_type;
3890
3891 tx->flags = 0;
3892 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
3893 switch (tx->type) {
3894 case BNA_TX_T_REGULAR:
3895 if (!(tx->bna->tx_mod.flags &
3896 BNA_TX_MOD_F_PORT_LOOPBACK))
3897 tx->flags |= BNA_TX_F_PORT_STARTED;
3898 break;
3899 case BNA_TX_T_LOOPBACK:
3900 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
3901 tx->flags |= BNA_TX_F_PORT_STARTED;
3902 break;
3903 }
3904 }
3905 if (tx->bna->tx_mod.cee_link)
3906 tx->flags |= BNA_TX_F_PRIO_LOCK;
3907
3908 /* TxQ */
3909
3910 i = 0;
3911 page_idx = 0;
3912 list_for_each(qe, &tx->txq_q) {
3913 txq = (struct bna_txq *)qe;
3914 txq->priority = tx_mod->priority;
3915 txq->tcb = (struct bna_tcb *)
3916 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3917 txq->tx_packets = 0;
3918 txq->tx_bytes = 0;
3919
3920 /* IB */
3921
3922 ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3923 ib_config.interpkt_timeo = 0; /* Not used */
3924 ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
3925 ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
3926 BFI_IB_CF_INT_ENABLE |
3927 BFI_IB_CF_COALESCING_MODE);
3928 bna_ib_config(txq->ib, &ib_config);
3929
3930 /* TCB */
3931
3932 txq->tcb->producer_index = 0;
3933 txq->tcb->consumer_index = 0;
3934 txq->tcb->hw_consumer_index = (volatile u32 *)
3935 ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
3936 (txq->ib_seg_offset * BFI_IBIDX_SIZE));
3937 *(txq->tcb->hw_consumer_index) = 0;
3938 txq->tcb->q_depth = tx_cfg->txq_depth;
3939 txq->tcb->unmap_q = (void *)
3940 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3941 qset = (struct bna_doorbell_qset *)0;
3942 off = (unsigned long)&qset[txq->txq_id].txq[0];
3943 txq->tcb->q_dbell = off +
3944 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
3945 txq->tcb->i_dbell = &txq->ib->door_bell;
3946 txq->tcb->intr_type = intr_info->intr_type;
3947 txq->tcb->intr_vector = (intr_info->num == 1) ?
3948 intr_info->idl[0].vector :
3949 intr_info->idl[i].vector;
3950 txq->tcb->txq = txq;
3951 txq->tcb->bnad = bnad;
3952 txq->tcb->id = i;
3953
3954 /* QPT, SWQPT, Pages */
3955 bna_txq_qpt_setup(txq, page_count, page_size,
3956 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3957 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3958 &res_info[BNA_TX_RES_MEM_T_PAGE].
3959 res_u.mem_info.mdl[page_idx]);
3960 txq->tcb->page_idx = page_idx;
3961 txq->tcb->page_count = page_count;
3962 page_idx += page_count;
3963
3964 /* Callback to bnad for setting up TCB */
3965 if (tx->tcb_setup_cbfn)
3966 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3967
3968 i++;
3969 }
3970
3971 /* TxF */
3972
3973 tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
3974 tx->txf.vlan = 0;
3975
3976 /* Mbox element */
3977 bfa_q_qe_init(&tx->mbox_qe.qe);
3978
3979 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3980
3981 return tx;
3982
3983err_return:
3984 bna_tx_free(tx);
3985 return NULL;
3986}
3987
3988void
3989bna_tx_destroy(struct bna_tx *tx)
3990{
3991 /* Callback to bnad for destroying TCB */
3992 if (tx->tcb_destroy_cbfn) {
3993 struct bna_txq *txq;
3994 struct list_head *qe;
3995
3996 list_for_each(qe, &tx->txq_q) {
3997 txq = (struct bna_txq *)qe;
3998 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3999 }
4000 }
4001
4002 bna_tx_free(tx);
4003}
4004
4005void
4006bna_tx_enable(struct bna_tx *tx)
4007{
4008 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
4009 return;
4010
4011 tx->flags |= BNA_TX_F_ENABLED;
4012
4013 if (tx->flags & BNA_TX_F_PORT_STARTED)
4014 bfa_fsm_send_event(tx, TX_E_START);
4015}
4016
4017void
4018bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
4019 void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
4020{
4021 if (type == BNA_SOFT_CLEANUP) {
4022 (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
4023 return;
4024 }
4025
4026 tx->stop_cbfn = cbfn;
4027 tx->stop_cbarg = tx->bna->bnad;
4028
4029 tx->flags &= ~BNA_TX_F_ENABLED;
4030
4031 bfa_fsm_send_event(tx, TX_E_STOP);
4032}
4033
4034int
4035bna_tx_state_get(struct bna_tx *tx)
4036{
4037 return bfa_sm_to_state(tx_sm_table, tx->fsm);
4038}
4039
4040void
4041bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
4042 struct bna_res_info *res_info)
4043{
4044 int i;
4045
4046 tx_mod->bna = bna;
4047 tx_mod->flags = 0;
4048
4049 tx_mod->tx = (struct bna_tx *)
4050 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
4051 tx_mod->txq = (struct bna_txq *)
4052 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
4053
4054 INIT_LIST_HEAD(&tx_mod->tx_free_q);
4055 INIT_LIST_HEAD(&tx_mod->tx_active_q);
4056
4057 INIT_LIST_HEAD(&tx_mod->txq_free_q);
4058
4059 for (i = 0; i < BFI_MAX_TXQ; i++) {
4060 tx_mod->tx[i].txf.txf_id = i;
4061 bfa_q_qe_init(&tx_mod->tx[i].qe);
4062 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
4063
4064 tx_mod->txq[i].txq_id = i;
4065 bfa_q_qe_init(&tx_mod->txq[i].qe);
4066 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
4067 }
4068
4069 tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
4070 tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
4071 tx_mod->tx_stop_wc.wc_count = 0;
4072}
4073
4074void
4075bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
4076{
4077 struct list_head *qe;
4078 int i;
4079
4080 i = 0;
4081 list_for_each(qe, &tx_mod->tx_free_q)
4082 i++;
4083
4084 i = 0;
4085 list_for_each(qe, &tx_mod->txq_free_q)
4086 i++;
4087
4088 tx_mod->bna = NULL;
4089}
4090
4091void
4092bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4093{
4094 struct bna_tx *tx;
4095 struct list_head *qe;
4096
4097 tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
4098 if (type == BNA_TX_T_LOOPBACK)
4099 tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
4100
4101 list_for_each(qe, &tx_mod->tx_active_q) {
4102 tx = (struct bna_tx *)qe;
4103 if (tx->type == type)
4104 bna_tx_start(tx);
4105 }
4106}
4107
4108void
4109bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4110{
4111 struct bna_tx *tx;
4112 struct list_head *qe;
4113
4114 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4115 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4116
4117 tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
4118
4119 /**
4120 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4121 * as we are going to call bna_tx_stop
4122 */
4123 list_for_each(qe, &tx_mod->tx_active_q) {
4124 tx = (struct bna_tx *)qe;
4125 if (tx->type == type)
4126 bfa_wc_up(&tx_mod->tx_stop_wc);
4127 }
4128
4129 if (tx_mod->tx_stop_wc.wc_count == 0) {
4130 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
4131 tx_mod->stop_cbfn = NULL;
4132 return;
4133 }
4134
4135 list_for_each(qe, &tx_mod->tx_active_q) {
4136 tx = (struct bna_tx *)qe;
4137 if (tx->type == type)
4138 bna_tx_stop(tx);
4139 }
4140}
4141
4142void
4143bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
4144{
4145 struct bna_tx *tx;
4146 struct list_head *qe;
4147
4148 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4149 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4150
4151 list_for_each(qe, &tx_mod->tx_active_q) {
4152 tx = (struct bna_tx *)qe;
4153 bna_tx_fail(tx);
4154 }
4155}
4156
4157void
4158bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
4159{
4160 struct bna_tx *tx;
4161 struct list_head *qe;
4162
4163 if (prio != tx_mod->priority) {
4164 tx_mod->priority = prio;
4165
4166 list_for_each(qe, &tx_mod->tx_active_q) {
4167 tx = (struct bna_tx *)qe;
4168 bna_tx_prio_changed(tx, prio);
4169 }
4170 }
4171}
4172
4173void
4174bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
4175{
4176 struct bna_tx *tx;
4177 struct list_head *qe;
4178
4179 tx_mod->cee_link = cee_link;
4180
4181 list_for_each(qe, &tx_mod->tx_active_q) {
4182 tx = (struct bna_tx *)qe;
4183 bna_tx_cee_link_status(tx, cee_link);
4184 }
4185}
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
new file mode 100644
index 000000000000..2f89cb235248
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -0,0 +1,1127 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BNA_TYPES_H__
19#define __BNA_TYPES_H__
20
21#include "cna.h"
22#include "bna_hw.h"
23#include "bfa_cee.h"
24
25/**
26 *
27 * Forward declarations
28 *
29 */
30
31struct bna_txq;
32struct bna_tx;
33struct bna_rxq;
34struct bna_cq;
35struct bna_rx;
36struct bna_rxf;
37struct bna_port;
38struct bna;
39struct bnad;
40
41/**
42 *
43 * Enums, primitive data types
44 *
45 */
46
47enum bna_status {
48 BNA_STATUS_T_DISABLED = 0,
49 BNA_STATUS_T_ENABLED = 1
50};
51
52enum bna_cleanup_type {
53 BNA_HARD_CLEANUP = 0,
54 BNA_SOFT_CLEANUP = 1
55};
56
57enum bna_cb_status {
58 BNA_CB_SUCCESS = 0,
59 BNA_CB_FAIL = 1,
60 BNA_CB_INTERRUPT = 2,
61 BNA_CB_BUSY = 3,
62 BNA_CB_INVALID_MAC = 4,
63 BNA_CB_MCAST_LIST_FULL = 5,
64 BNA_CB_UCAST_CAM_FULL = 6,
65 BNA_CB_WAITING = 7,
66 BNA_CB_NOT_EXEC = 8
67};
68
69enum bna_res_type {
70 BNA_RES_T_MEM = 1,
71 BNA_RES_T_INTR = 2
72};
73
74enum bna_mem_type {
75 BNA_MEM_T_KVA = 1,
76 BNA_MEM_T_DMA = 2
77};
78
79enum bna_intr_type {
80 BNA_INTR_T_INTX = 1,
81 BNA_INTR_T_MSIX = 2
82};
83
84enum bna_res_req_type {
85 BNA_RES_MEM_T_COM = 0,
86 BNA_RES_MEM_T_ATTR = 1,
87 BNA_RES_MEM_T_FWTRC = 2,
88 BNA_RES_MEM_T_STATS = 3,
89 BNA_RES_MEM_T_SWSTATS = 4,
90 BNA_RES_MEM_T_IBIDX = 5,
91 BNA_RES_MEM_T_IB_ARRAY = 6,
92 BNA_RES_MEM_T_INTR_ARRAY = 7,
93 BNA_RES_MEM_T_IDXSEG_ARRAY = 8,
94 BNA_RES_MEM_T_TX_ARRAY = 9,
95 BNA_RES_MEM_T_TXQ_ARRAY = 10,
96 BNA_RES_MEM_T_RX_ARRAY = 11,
97 BNA_RES_MEM_T_RXP_ARRAY = 12,
98 BNA_RES_MEM_T_RXQ_ARRAY = 13,
99 BNA_RES_MEM_T_UCMAC_ARRAY = 14,
100 BNA_RES_MEM_T_MCMAC_ARRAY = 15,
101 BNA_RES_MEM_T_RIT_ENTRY = 16,
102 BNA_RES_MEM_T_RIT_SEGMENT = 17,
103 BNA_RES_INTR_T_MBOX = 18,
104 BNA_RES_T_MAX
105};
106
107enum bna_tx_res_req_type {
108 BNA_TX_RES_MEM_T_TCB = 0,
109 BNA_TX_RES_MEM_T_UNMAPQ = 1,
110 BNA_TX_RES_MEM_T_QPT = 2,
111 BNA_TX_RES_MEM_T_SWQPT = 3,
112 BNA_TX_RES_MEM_T_PAGE = 4,
113 BNA_TX_RES_INTR_T_TXCMPL = 5,
114 BNA_TX_RES_T_MAX,
115};
116
117enum bna_rx_mem_type {
118 BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
119 BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
120 BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
121 BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
122 BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
123 BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
124 BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
125 BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
126 BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
127 BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
128 BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
129 BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
130 BNA_RX_RES_T_INTR = 12, /* Rx interrupts */
131 BNA_RX_RES_T_MAX = 13
132};
133
134enum bna_mbox_state {
135 BNA_MBOX_FREE = 0,
136 BNA_MBOX_POSTED = 1
137};
138
139enum bna_tx_type {
140 BNA_TX_T_REGULAR = 0,
141 BNA_TX_T_LOOPBACK = 1,
142};
143
144enum bna_tx_flags {
145 BNA_TX_F_PORT_STARTED = 1,
146 BNA_TX_F_ENABLED = 2,
147 BNA_TX_F_PRIO_LOCK = 4,
148};
149
150enum bna_tx_mod_flags {
151 BNA_TX_MOD_F_PORT_STARTED = 1,
152 BNA_TX_MOD_F_PORT_LOOPBACK = 2,
153};
154
155enum bna_rx_type {
156 BNA_RX_T_REGULAR = 0,
157 BNA_RX_T_LOOPBACK = 1,
158};
159
160enum bna_rxp_type {
161 BNA_RXP_SINGLE = 1,
162 BNA_RXP_SLR = 2,
163 BNA_RXP_HDS = 3
164};
165
166enum bna_rxmode {
167 BNA_RXMODE_PROMISC = 1,
168 BNA_RXMODE_ALLMULTI = 2
169};
170
171enum bna_rx_event {
172 RX_E_START = 1,
173 RX_E_STOP = 2,
174 RX_E_FAIL = 3,
175 RX_E_RXF_STARTED = 4,
176 RX_E_RXF_STOPPED = 5,
177 RX_E_RXQ_STOPPED = 6,
178};
179
180enum bna_rx_state {
181 BNA_RX_STOPPED = 1,
182 BNA_RX_RXF_START_WAIT = 2,
183 BNA_RX_STARTED = 3,
184 BNA_RX_RXF_STOP_WAIT = 4,
185 BNA_RX_RXQ_STOP_WAIT = 5,
186};
187
188enum bna_rx_flags {
189 BNA_RX_F_ENABLE = 0x01, /* bnad enabled rxf */
190 BNA_RX_F_PORT_ENABLED = 0x02, /* Port object is enabled */
191 BNA_RX_F_PORT_FAILED = 0x04, /* Port in failed state */
192};
193
194enum bna_rx_mod_flags {
195 BNA_RX_MOD_F_PORT_STARTED = 1,
196 BNA_RX_MOD_F_PORT_LOOPBACK = 2,
197};
198
199enum bna_rxf_oper_state {
200 BNA_RXF_OPER_STATE_RUNNING = 0x01, /* rxf operational */
201 BNA_RXF_OPER_STATE_PAUSED = 0x02, /* rxf in PAUSED state */
202};
203
204enum bna_rxf_flags {
205 BNA_RXF_FL_STOP_PENDING = 0x01,
206 BNA_RXF_FL_FAILED = 0x02,
207 BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
208 BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
209 BNA_RXF_FL_RXF_ENABLED = 0x10,
210 BNA_RXF_FL_VLAN_CONFIG_PENDING = 0x20,
211};
212
213enum bna_rxf_event {
214 RXF_E_START = 1,
215 RXF_E_STOP = 2,
216 RXF_E_FAIL = 3,
217 RXF_E_CAM_FLTR_MOD = 4,
218 RXF_E_STARTED = 5,
219 RXF_E_STOPPED = 6,
220 RXF_E_CAM_FLTR_RESP = 7,
221 RXF_E_PAUSE = 8,
222 RXF_E_RESUME = 9,
223 RXF_E_STAT_CLEARED = 10,
224};
225
226enum bna_rxf_state {
227 BNA_RXF_STOPPED = 1,
228 BNA_RXF_START_WAIT = 2,
229 BNA_RXF_CAM_FLTR_MOD_WAIT = 3,
230 BNA_RXF_STARTED = 4,
231 BNA_RXF_CAM_FLTR_CLR_WAIT = 5,
232 BNA_RXF_STOP_WAIT = 6,
233 BNA_RXF_PAUSE_WAIT = 7,
234 BNA_RXF_RESUME_WAIT = 8,
235 BNA_RXF_STAT_CLR_WAIT = 9,
236};
237
238enum bna_port_type {
239 BNA_PORT_T_REGULAR = 0,
240 BNA_PORT_T_LOOPBACK_INTERNAL = 1,
241 BNA_PORT_T_LOOPBACK_EXTERNAL = 2,
242};
243
244enum bna_link_status {
245 BNA_LINK_DOWN = 0,
246 BNA_LINK_UP = 1,
247 BNA_CEE_UP = 2
248};
249
250enum bna_llport_flags {
251 BNA_LLPORT_F_ADMIN_UP = 1,
252 BNA_LLPORT_F_PORT_ENABLED = 2,
253 BNA_LLPORT_F_RX_STARTED = 4
254};
255
256enum bna_port_flags {
257 BNA_PORT_F_DEVICE_READY = 1,
258 BNA_PORT_F_ENABLED = 2,
259 BNA_PORT_F_PAUSE_CHANGED = 4,
260 BNA_PORT_F_MTU_CHANGED = 8
261};
262
263enum bna_pkt_rates {
264 BNA_PKT_RATE_10K = 10000,
265 BNA_PKT_RATE_20K = 20000,
266 BNA_PKT_RATE_30K = 30000,
267 BNA_PKT_RATE_40K = 40000,
268 BNA_PKT_RATE_50K = 50000,
269 BNA_PKT_RATE_60K = 60000,
270 BNA_PKT_RATE_70K = 70000,
271 BNA_PKT_RATE_80K = 80000,
272};
273
274enum bna_dim_load_types {
275 BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
276 BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
277 BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
278 BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
279 BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
280 BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
281 BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
282 BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
283 BNA_LOAD_T_MAX = 8
284};
285
286enum bna_dim_bias_types {
287 BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
288 BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
289 BNA_BIAS_T_MAX = 2
290};
291
292struct bna_mac {
293 /* This should be the first one */
294 struct list_head qe;
295 u8 addr[ETH_ALEN];
296};
297
298struct bna_mem_descr {
299 u32 len;
300 void *kva;
301 struct bna_dma_addr dma;
302};
303
304struct bna_mem_info {
305 enum bna_mem_type mem_type;
306 u32 len;
307 u32 num;
308 u32 align_sz; /* 0/1 = no alignment */
309 struct bna_mem_descr *mdl;
310 void *cookie; /* For bnad to unmap dma later */
311};
312
313struct bna_intr_descr {
314 int vector;
315};
316
317struct bna_intr_info {
318 enum bna_intr_type intr_type;
319 int num;
320 struct bna_intr_descr *idl;
321};
322
323union bna_res_u {
324 struct bna_mem_info mem_info;
325 struct bna_intr_info intr_info;
326};
327
328struct bna_res_info {
329 enum bna_res_type res_type;
330 union bna_res_u res_u;
331};
332
333/* HW QPT */
334struct bna_qpt {
335 struct bna_dma_addr hw_qpt_ptr;
336 void *kv_qpt_ptr;
337 u32 page_count;
338 u32 page_size;
339};
340
341/**
342 *
343 * Device
344 *
345 */
346
347struct bna_device {
348 bfa_fsm_t fsm;
349 struct bfa_ioc ioc;
350
351 enum bna_intr_type intr_type;
352 int vector;
353
354 void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
355 struct bnad *ready_cbarg;
356
357 void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
358 struct bnad *stop_cbarg;
359
360 struct bna *bna;
361};
362
363/**
364 *
365 * Mail box
366 *
367 */
368
369struct bna_mbox_qe {
370 /* This should be the first one */
371 struct list_head qe;
372
373 struct bfa_mbox_cmd cmd;
374 u32 cmd_len;
375 /* Callback for port, tx, rx, rxf */
376 void (*cbfn)(void *arg, int status);
377 void *cbarg;
378};
379
380struct bna_mbox_mod {
381 enum bna_mbox_state state;
382 struct list_head posted_q;
383 u32 msg_pending;
384 u32 msg_ctr;
385 struct bna *bna;
386};
387
388/**
389 *
390 * Port
391 *
392 */
393
394/* Pause configuration */
395struct bna_pause_config {
396 enum bna_status tx_pause;
397 enum bna_status rx_pause;
398};
399
400struct bna_llport {
401 bfa_fsm_t fsm;
402 enum bna_llport_flags flags;
403
404 enum bna_port_type type;
405
406 enum bna_link_status link_status;
407
408 int rx_started_count;
409
410 void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
411
412 struct bna_mbox_qe mbox_qe;
413
414 struct bna *bna;
415};
416
417struct bna_port {
418 bfa_fsm_t fsm;
419 enum bna_port_flags flags;
420
421 enum bna_port_type type;
422
423 struct bna_llport llport;
424
425 struct bna_pause_config pause_config;
426 u8 priority;
427 int mtu;
428
429 /* Callback for bna_port_disable(), port_stop() */
430 void (*stop_cbfn)(void *, enum bna_cb_status);
431 void *stop_cbarg;
432
433 /* Callback for bna_port_pause_config() */
434 void (*pause_cbfn)(struct bnad *, enum bna_cb_status);
435
436 /* Callback for bna_port_mtu_set() */
437 void (*mtu_cbfn)(struct bnad *, enum bna_cb_status);
438
439 void (*link_cbfn)(struct bnad *, enum bna_link_status);
440
441 struct bfa_wc chld_stop_wc;
442
443 struct bna_mbox_qe mbox_qe;
444
445 struct bna *bna;
446};
447
448/**
449 *
450 * Interrupt Block
451 *
452 */
453
454/* IB index segment structure */
455struct bna_ibidx_seg {
456 /* This should be the first one */
457 struct list_head qe;
458
459 u8 ib_seg_size;
460 u8 ib_idx_tbl_offset;
461};
462
463/* Interrupt structure */
464struct bna_intr {
465 /* This should be the first one */
466 struct list_head qe;
467 int ref_count;
468
469 enum bna_intr_type intr_type;
470 int vector;
471
472 struct bna_ib *ib;
473};
474
475/* Doorbell structure */
476struct bna_ib_dbell {
477 void *__iomem doorbell_addr;
478 u32 doorbell_ack;
479};
480
481/* Interrupt timer configuration */
482struct bna_ib_config {
483 u8 coalescing_timeo; /* Unit is 5usec. */
484
485 int interpkt_count;
486 int interpkt_timeo;
487
488 enum ib_flags ctrl_flags;
489};
490
491/* IB structure */
492struct bna_ib {
493 /* This should be the first one */
494 struct list_head qe;
495
496 int ib_id;
497
498 int ref_count;
499 int start_count;
500
501 struct bna_dma_addr ib_seg_host_addr;
502 void *ib_seg_host_addr_kva;
503 u32 idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
504
505 struct bna_ibidx_seg *idx_seg;
506
507 struct bna_ib_dbell door_bell;
508
509 struct bna_intr *intr;
510
511 struct bna_ib_config ib_config;
512
513 struct bna *bna;
514};
515
516/* IB module - keeps track of IBs and interrupts */
517struct bna_ib_mod {
518 struct bna_ib *ib; /* BFI_MAX_IB entries */
519 struct bna_intr *intr; /* BFI_MAX_IB entries */
520 struct bna_ibidx_seg *idx_seg; /* BNA_IBIDX_TOTAL_SEGS */
521
522 struct list_head ib_free_q;
523
524 struct list_head ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
525
526 struct list_head intr_free_q;
527 struct list_head intr_active_q;
528
529 struct bna *bna;
530};
531
532/**
533 *
534 * Tx object
535 *
536 */
537
538/* Tx datapath control structure */
539#define BNA_Q_NAME_SIZE 16
540struct bna_tcb {
541 /* Fast path */
542 void **sw_qpt;
543 void *unmap_q;
544 u32 producer_index;
545 u32 consumer_index;
546 volatile u32 *hw_consumer_index;
547 u32 q_depth;
548 void *__iomem q_dbell;
549 struct bna_ib_dbell *i_dbell;
550 int page_idx;
551 int page_count;
552 /* Control path */
553 struct bna_txq *txq;
554 struct bnad *bnad;
555 enum bna_intr_type intr_type;
556 int intr_vector;
557 u8 priority; /* Current priority */
558 unsigned long flags; /* Used by bnad as required */
559 int id;
560 char name[BNA_Q_NAME_SIZE];
561};
562
563/* TxQ QPT and configuration */
564struct bna_txq {
565 /* This should be the first one */
566 struct list_head qe;
567
568 int txq_id;
569
570 u8 priority;
571
572 struct bna_qpt qpt;
573 struct bna_tcb *tcb;
574 struct bna_ib *ib;
575 int ib_seg_offset;
576
577 struct bna_tx *tx;
578
579 u64 tx_packets;
580 u64 tx_bytes;
581};
582
583/* TxF structure (hardware Tx Function) */
584struct bna_txf {
585 int txf_id;
586 enum txf_flags ctrl_flags;
587 u16 vlan;
588};
589
590/* Tx object */
591struct bna_tx {
592 /* This should be the first one */
593 struct list_head qe;
594
595 bfa_fsm_t fsm;
596 enum bna_tx_flags flags;
597
598 enum bna_tx_type type;
599
600 struct list_head txq_q;
601 struct bna_txf txf;
602
603 /* Tx event handlers */
604 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
605 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
606 void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
607 void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
608 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
609
610 /* callback for bna_tx_disable(), bna_tx_stop() */
611 void (*stop_cbfn)(void *arg, struct bna_tx *tx,
612 enum bna_cb_status status);
613 void *stop_cbarg;
614
615 /* callback for bna_tx_prio_set() */
616 void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
617 enum bna_cb_status status);
618
619 struct bfa_wc txq_stop_wc;
620
621 struct bna_mbox_qe mbox_qe;
622
623 struct bna *bna;
624 void *priv; /* bnad's cookie */
625};
626
627struct bna_tx_config {
628 int num_txq;
629 int txq_depth;
630 enum bna_tx_type tx_type;
631};
632
633struct bna_tx_event_cbfn {
634 /* Optional */
635 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
636 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
637 /* Mandatory */
638 void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
639 void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
640 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
641};
642
643/* Tx module - keeps track of free, active tx objects */
644struct bna_tx_mod {
645 struct bna_tx *tx; /* BFI_MAX_TXQ entries */
646 struct bna_txq *txq; /* BFI_MAX_TXQ entries */
647
648 struct list_head tx_free_q;
649 struct list_head tx_active_q;
650
651 struct list_head txq_free_q;
652
653 /* callback for bna_tx_mod_stop() */
654 void (*stop_cbfn)(struct bna_port *port,
655 enum bna_cb_status status);
656
657 struct bfa_wc tx_stop_wc;
658
659 enum bna_tx_mod_flags flags;
660
661 int priority;
662 int cee_link;
663
664 u32 txf_bmap[2];
665
666 struct bna *bna;
667};
668
669/**
670 *
671 * Receive Indirection Table
672 *
673 */
674
675/* One row of RIT table */
676struct bna_rit_entry {
677 u8 large_rxq_id; /* used for either large or data buffers */
678 u8 small_rxq_id; /* used for either small or header buffers */
679};
680
681/* RIT segment */
682struct bna_rit_segment {
683 struct list_head qe;
684
685 u32 rit_offset;
686 u32 rit_size;
687 /**
688 * max_rit_size: Varies per RIT segment depending on how RIT is
689 * partitioned
690 */
691 u32 max_rit_size;
692
693 struct bna_rit_entry *rit;
694};
695
696struct bna_rit_mod {
697 struct bna_rit_entry *rit;
698 struct bna_rit_segment *rit_segment;
699
700 struct list_head rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
701};
702
703/**
704 *
705 * Rx object
706 *
707 */
708
709/* Rx datapath control structure */
710struct bna_rcb {
711 /* Fast path */
712 void **sw_qpt;
713 void *unmap_q;
714 u32 producer_index;
715 u32 consumer_index;
716 u32 q_depth;
717 void *__iomem q_dbell;
718 int page_idx;
719 int page_count;
720 /* Control path */
721 struct bna_rxq *rxq;
722 struct bna_cq *cq;
723 struct bnad *bnad;
724 unsigned long flags;
725 int id;
726};
727
728/* RxQ structure - QPT, configuration */
729struct bna_rxq {
730 struct list_head qe;
731 int rxq_id;
732
733 int buffer_size;
734 int q_depth;
735
736 struct bna_qpt qpt;
737 struct bna_rcb *rcb;
738
739 struct bna_rxp *rxp;
740 struct bna_rx *rx;
741
742 u64 rx_packets;
743 u64 rx_bytes;
744 u64 rx_packets_with_error;
745 u64 rxbuf_alloc_failed;
746};
747
748/* RxQ pair */
749union bna_rxq_u {
750 struct {
751 struct bna_rxq *hdr;
752 struct bna_rxq *data;
753 } hds;
754 struct {
755 struct bna_rxq *small;
756 struct bna_rxq *large;
757 } slr;
758 struct {
759 struct bna_rxq *only;
760 struct bna_rxq *reserved;
761 } single;
762};
763
764/* Packet rate for Dynamic Interrupt Moderation */
765struct bna_pkt_rate {
766 u32 small_pkt_cnt;
767 u32 large_pkt_cnt;
768};
769
770/* Completion control structure */
771struct bna_ccb {
772 /* Fast path */
773 void **sw_qpt;
774 u32 producer_index;
775 volatile u32 *hw_producer_index;
776 u32 q_depth;
777 struct bna_ib_dbell *i_dbell;
778 struct bna_rcb *rcb[2];
779 void *ctrl; /* For bnad */
780 struct bna_pkt_rate pkt_rate;
781 int page_idx;
782 int page_count;
783
784 /* Control path */
785 struct bna_cq *cq;
786 struct bnad *bnad;
787 enum bna_intr_type intr_type;
788 int intr_vector;
789 u8 rx_coalescing_timeo; /* For NAPI */
790 int id;
791 char name[BNA_Q_NAME_SIZE];
792};
793
794/* CQ QPT, configuration */
795struct bna_cq {
796 int cq_id;
797
798 struct bna_qpt qpt;
799 struct bna_ccb *ccb;
800
801 struct bna_ib *ib;
802 u8 ib_seg_offset;
803
804 struct bna_rx *rx;
805};
806
807struct bna_rss_config {
808 enum rss_hash_type hash_type;
809 u8 hash_mask;
810 u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
811};
812
813struct bna_hds_config {
814 enum hds_header_type hdr_type;
815 int header_size;
816};
817
818/* This structure is used during RX creation */
819struct bna_rx_config {
820 enum bna_rx_type rx_type;
821 int num_paths;
822 enum bna_rxp_type rxp_type;
823 int paused;
824 int q_depth;
825 /*
826 * Small/Large (or Header/Data) buffer size to be configured
827 * for SLR and HDS queue type. Large buffer size comes from
828 * port->mtu.
829 */
830 int small_buff_size;
831
832 enum bna_status rss_status;
833 struct bna_rss_config rss_config;
834
835 enum bna_status hds_status;
836 struct bna_hds_config hds_config;
837
838 enum bna_status vlan_strip_status;
839};
840
841/* Rx Path structure - one per MSIX vector/CPU */
842struct bna_rxp {
843 /* This should be the first one */
844 struct list_head qe;
845
846 enum bna_rxp_type type;
847 union bna_rxq_u rxq;
848 struct bna_cq cq;
849
850 struct bna_rx *rx;
851
852 /* MSI-x vector number for configuring RSS */
853 int vector;
854
855 struct bna_mbox_qe mbox_qe;
856};
857
858/* HDS configuration structure */
859struct bna_rxf_hds {
860 enum hds_header_type hdr_type;
861 int header_size;
862};
863
864/* RSS configuration structure */
865struct bna_rxf_rss {
866 enum rss_hash_type hash_type;
867 u8 hash_mask;
868 u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
869};
870
871/* RxF structure (hardware Rx Function) */
872struct bna_rxf {
873 bfa_fsm_t fsm;
874 int rxf_id;
875 enum rxf_flags ctrl_flags;
876 u16 default_vlan_tag;
877 enum bna_rxf_oper_state rxf_oper_state;
878 enum bna_status hds_status;
879 struct bna_rxf_hds hds_cfg;
880 enum bna_status rss_status;
881 struct bna_rxf_rss rss_cfg;
882 struct bna_rit_segment *rit_segment;
883 struct bna_rx *rx;
884 u32 forced_offset;
885 struct bna_mbox_qe mbox_qe;
886 int mcast_rxq_id;
887
888 /* callback for bna_rxf_start() */
889 void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
890 struct bna_rx *start_cbarg;
891
892 /* callback for bna_rxf_stop() */
893 void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
894 struct bna_rx *stop_cbarg;
895
896 /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
897 void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
898 enum bna_cb_status status);
899 struct bnad *oper_state_cbarg;
900
901 /**
902 * callback for:
903 * bna_rxf_ucast_set()
904 * bna_rxf_{ucast/mcast}_add(),
905 * bna_rxf_{ucast/mcast}_del(),
906 * bna_rxf_mode_set()
907 */
908 void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
909 enum bna_cb_status status);
910 struct bnad *cam_fltr_cbarg;
911
912 enum bna_rxf_flags rxf_flags;
913
914 /* List of unicast addresses yet to be applied to h/w */
915 struct list_head ucast_pending_add_q;
916 struct list_head ucast_pending_del_q;
917 int ucast_pending_set;
918 /* ucast addresses applied to the h/w */
919 struct list_head ucast_active_q;
920 struct bna_mac *ucast_active_mac;
921
922 /* List of multicast addresses yet to be applied to h/w */
923 struct list_head mcast_pending_add_q;
924 struct list_head mcast_pending_del_q;
925 /* multicast addresses applied to the h/w */
926 struct list_head mcast_active_q;
927
928 /* Rx modes yet to be applied to h/w */
929 enum bna_rxmode rxmode_pending;
930 enum bna_rxmode rxmode_pending_bitmask;
931 /* Rx modes applied to h/w */
932 enum bna_rxmode rxmode_active;
933
934 enum bna_status vlan_filter_status;
935 u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
936};
937
938/* Rx object */
939struct bna_rx {
940 /* This should be the first one */
941 struct list_head qe;
942
943 bfa_fsm_t fsm;
944
945 enum bna_rx_type type;
946
947 /* list-head for RX path objects */
948 struct list_head rxp_q;
949
950 struct bna_rxf rxf;
951
952 enum bna_rx_flags rx_flags;
953
954 struct bna_mbox_qe mbox_qe;
955
956 struct bfa_wc rxq_stop_wc;
957
958 /* Rx event handlers */
959 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
960 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
961 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
962 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
963 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
964 void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
965
966 /* callback for bna_rx_disable(), bna_rx_stop() */
967 void (*stop_cbfn)(void *arg, struct bna_rx *rx,
968 enum bna_cb_status status);
969 void *stop_cbarg;
970
971 struct bna *bna;
972 void *priv; /* bnad's cookie */
973};
974
975struct bna_rx_event_cbfn {
976 /* Optional */
977 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
978 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
979 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
980 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
981 /* Mandatory */
982 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
983 void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
984};
985
986/* Rx module - keeps track of free, active rx objects */
987struct bna_rx_mod {
988 struct bna *bna; /* back pointer to parent */
989 struct bna_rx *rx; /* BFI_MAX_RXQ entries */
990 struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
991 struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
992
993 struct list_head rx_free_q;
994 struct list_head rx_active_q;
995 int rx_free_count;
996
997 struct list_head rxp_free_q;
998 int rxp_free_count;
999
1000 struct list_head rxq_free_q;
1001 int rxq_free_count;
1002
1003 enum bna_rx_mod_flags flags;
1004
1005 /* callback for bna_rx_mod_stop() */
1006 void (*stop_cbfn)(struct bna_port *port,
1007 enum bna_cb_status status);
1008
1009 struct bfa_wc rx_stop_wc;
1010 u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
1011 u32 rxf_bmap[2];
1012};
1013
1014/**
1015 *
1016 * CAM
1017 *
1018 */
1019
1020struct bna_ucam_mod {
1021 struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
1022 struct list_head free_q;
1023
1024 struct bna *bna;
1025};
1026
1027struct bna_mcam_mod {
1028 struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
1029 struct list_head free_q;
1030
1031 struct bna *bna;
1032};
1033
1034/**
1035 *
1036 * Statistics
1037 *
1038 */
1039
1040struct bna_tx_stats {
1041 int tx_state;
1042 int tx_flags;
1043 int num_txqs;
1044 u32 txq_bmap[2];
1045 int txf_id;
1046};
1047
1048struct bna_rx_stats {
1049 int rx_state;
1050 int rx_flags;
1051 int num_rxps;
1052 int num_rxqs;
1053 u32 rxq_bmap[2];
1054 u32 cq_bmap[2];
1055 int rxf_id;
1056 int rxf_state;
1057 int rxf_oper_state;
1058 int num_active_ucast;
1059 int num_active_mcast;
1060 int rxmode_active;
1061 int vlan_filter_status;
1062 u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
1063 int rss_status;
1064 int hds_status;
1065};
1066
1067struct bna_sw_stats {
1068 int device_state;
1069 int port_state;
1070 int port_flags;
1071 int llport_state;
1072 int priority;
1073 int num_active_tx;
1074 int num_active_rx;
1075 struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
1076 struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
1077};
1078
1079struct bna_stats {
1080 u32 txf_bmap[2];
1081 u32 rxf_bmap[2];
1082 struct bfi_ll_stats *hw_stats;
1083 struct bna_sw_stats *sw_stats;
1084};
1085
1086/**
1087 *
1088 * BNA
1089 *
1090 */
1091
1092struct bna {
1093 struct bfa_pcidev pcidev;
1094
1095 int port_num;
1096
1097 struct bna_chip_regs regs;
1098
1099 struct bna_dma_addr hw_stats_dma;
1100 struct bna_stats stats;
1101
1102 struct bna_device device;
1103 struct bfa_cee cee;
1104
1105 struct bna_mbox_mod mbox_mod;
1106
1107 struct bna_port port;
1108
1109 struct bna_tx_mod tx_mod;
1110
1111 struct bna_rx_mod rx_mod;
1112
1113 struct bna_ib_mod ib_mod;
1114
1115 struct bna_ucam_mod ucam_mod;
1116 struct bna_mcam_mod mcam_mod;
1117
1118 struct bna_rit_mod rit_mod;
1119
1120 int rxf_promisc_id;
1121
1122 struct bna_mbox_qe mbox_qe;
1123
1124 struct bnad *bnad;
1125};
1126
1127#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
new file mode 100644
index 000000000000..8e35b2596f93
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -0,0 +1,3266 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/bitops.h>
19#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
27#include <linux/prefetch.h>
28
29#include "bnad.h"
30#include "bna.h"
31#include "cna.h"
32
33static DEFINE_MUTEX(bnad_fwimg_mutex);
34
35/*
36 * Module params
37 */
38static uint bnad_msix_disable;
39module_param(bnad_msix_disable, uint, 0444);
40MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42static uint bnad_ioc_auto_recover = 1;
43module_param(bnad_ioc_auto_recover, uint, 0444);
44MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46/*
47 * Global variables
48 */
49u32 bnad_rxqs_per_cq = 2;
50
51static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
52
53/*
54 * Local MACROS
55 */
56#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60#define BNAD_GET_MBOX_IRQ(_bnad) \
61 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
62 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
63 ((_bnad)->pcidev->irq))
64
65#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66do { \
67 (_res_info)->res_type = BNA_RES_T_MEM; \
68 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
69 (_res_info)->res_u.mem_info.num = (_num); \
70 (_res_info)->res_u.mem_info.len = \
71 sizeof(struct bnad_unmap_q) + \
72 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
73} while (0)
74
75#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
76
77/*
78 * Reinitialize completions in CQ, once Rx is taken down
79 */
80static void
81bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82{
83 struct bna_cq_entry *cmpl, *next_cmpl;
84 unsigned int wi_range, wis = 0, ccb_prod = 0;
85 int i;
86
87 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88 wi_range);
89
90 for (i = 0; i < ccb->q_depth; i++) {
91 wis++;
92 if (likely(--wi_range))
93 next_cmpl = cmpl + 1;
94 else {
95 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96 wis = 0;
97 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98 next_cmpl, wi_range);
99 }
100 cmpl->valid = 0;
101 cmpl = next_cmpl;
102 }
103}
104
105/*
106 * Frees all pending Tx Bufs
107 * At this point no activity is expected on the Q,
108 * so DMA unmap & freeing is fine.
109 */
110static void
111bnad_free_all_txbufs(struct bnad *bnad,
112 struct bna_tcb *tcb)
113{
114 u32 unmap_cons;
115 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
116 struct bnad_skb_unmap *unmap_array;
117 struct sk_buff *skb = NULL;
118 int i;
119
120 unmap_array = unmap_q->unmap_array;
121
122 unmap_cons = 0;
123 while (unmap_cons < unmap_q->q_depth) {
124 skb = unmap_array[unmap_cons].skb;
125 if (!skb) {
126 unmap_cons++;
127 continue;
128 }
129 unmap_array[unmap_cons].skb = NULL;
130
131 dma_unmap_single(&bnad->pcidev->dev,
132 dma_unmap_addr(&unmap_array[unmap_cons],
133 dma_addr), skb_headlen(skb),
134 DMA_TO_DEVICE);
135
136 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
137 if (++unmap_cons >= unmap_q->q_depth)
138 break;
139
140 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
141 dma_unmap_page(&bnad->pcidev->dev,
142 dma_unmap_addr(&unmap_array[unmap_cons],
143 dma_addr),
144 skb_shinfo(skb)->frags[i].size,
145 DMA_TO_DEVICE);
146 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
147 0);
148 if (++unmap_cons >= unmap_q->q_depth)
149 break;
150 }
151 dev_kfree_skb_any(skb);
152 }
153}
154
155/* Data Path Handlers */
156
157/*
158 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
159 * Can be called in a) Interrupt context
160 * b) Sending context
161 * c) Tasklet context
162 */
163static u32
164bnad_free_txbufs(struct bnad *bnad,
165 struct bna_tcb *tcb)
166{
167 u32 sent_packets = 0, sent_bytes = 0;
168 u16 wis, unmap_cons, updated_hw_cons;
169 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
170 struct bnad_skb_unmap *unmap_array;
171 struct sk_buff *skb;
172 int i;
173
174 /*
175 * Just return if TX is stopped. This check is useful
176 * when bnad_free_txbufs() runs out of a tasklet scheduled
177 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
178 * but this routine runs actually after the cleanup has been
179 * executed.
180 */
181 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
182 return 0;
183
184 updated_hw_cons = *(tcb->hw_consumer_index);
185
186 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
187 updated_hw_cons, tcb->q_depth);
188
189 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
190
191 unmap_array = unmap_q->unmap_array;
192 unmap_cons = unmap_q->consumer_index;
193
194 prefetch(&unmap_array[unmap_cons + 1]);
195 while (wis) {
196 skb = unmap_array[unmap_cons].skb;
197
198 unmap_array[unmap_cons].skb = NULL;
199
200 sent_packets++;
201 sent_bytes += skb->len;
202 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
203
204 dma_unmap_single(&bnad->pcidev->dev,
205 dma_unmap_addr(&unmap_array[unmap_cons],
206 dma_addr), skb_headlen(skb),
207 DMA_TO_DEVICE);
208 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
209 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
210
211 prefetch(&unmap_array[unmap_cons + 1]);
212 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
213 prefetch(&unmap_array[unmap_cons + 1]);
214
215 dma_unmap_page(&bnad->pcidev->dev,
216 dma_unmap_addr(&unmap_array[unmap_cons],
217 dma_addr),
218 skb_shinfo(skb)->frags[i].size,
219 DMA_TO_DEVICE);
220 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
221 0);
222 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
223 }
224 dev_kfree_skb_any(skb);
225 }
226
227 /* Update consumer pointers. */
228 tcb->consumer_index = updated_hw_cons;
229 unmap_q->consumer_index = unmap_cons;
230
231 tcb->txq->tx_packets += sent_packets;
232 tcb->txq->tx_bytes += sent_bytes;
233
234 return sent_packets;
235}
236
237/* Tx Free Tasklet function */
238/* Frees for all the tcb's in all the Tx's */
239/*
240 * Scheduled from sending context, so that
241 * the fat Tx lock is not held for too long
242 * in the sending context.
243 */
244static void
245bnad_tx_free_tasklet(unsigned long bnad_ptr)
246{
247 struct bnad *bnad = (struct bnad *)bnad_ptr;
248 struct bna_tcb *tcb;
249 u32 acked = 0;
250 int i, j;
251
252 for (i = 0; i < bnad->num_tx; i++) {
253 for (j = 0; j < bnad->num_txq_per_tx; j++) {
254 tcb = bnad->tx_info[i].tcb[j];
255 if (!tcb)
256 continue;
257 if (((u16) (*tcb->hw_consumer_index) !=
258 tcb->consumer_index) &&
259 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
260 &tcb->flags))) {
261 acked = bnad_free_txbufs(bnad, tcb);
262 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
263 &tcb->flags)))
264 bna_ib_ack(tcb->i_dbell, acked);
265 smp_mb__before_clear_bit();
266 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
267 }
268 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
269 &tcb->flags)))
270 continue;
271 if (netif_queue_stopped(bnad->netdev)) {
272 if (acked && netif_carrier_ok(bnad->netdev) &&
273 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
274 BNAD_NETIF_WAKE_THRESHOLD) {
275 netif_wake_queue(bnad->netdev);
276 /* TODO */
277 /* Counters for individual TxQs? */
278 BNAD_UPDATE_CTR(bnad,
279 netif_queue_wakeup);
280 }
281 }
282 }
283 }
284}
285
286static u32
287bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
288{
289 struct net_device *netdev = bnad->netdev;
290 u32 sent = 0;
291
292 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
293 return 0;
294
295 sent = bnad_free_txbufs(bnad, tcb);
296 if (sent) {
297 if (netif_queue_stopped(netdev) &&
298 netif_carrier_ok(netdev) &&
299 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
300 BNAD_NETIF_WAKE_THRESHOLD) {
301 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
302 netif_wake_queue(netdev);
303 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
304 }
305 }
306 }
307
308 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
309 bna_ib_ack(tcb->i_dbell, sent);
310
311 smp_mb__before_clear_bit();
312 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
313
314 return sent;
315}
316
317/* MSIX Tx Completion Handler */
318static irqreturn_t
319bnad_msix_tx(int irq, void *data)
320{
321 struct bna_tcb *tcb = (struct bna_tcb *)data;
322 struct bnad *bnad = tcb->bnad;
323
324 bnad_tx(bnad, tcb);
325
326 return IRQ_HANDLED;
327}
328
329static void
330bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
331{
332 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
333
334 rcb->producer_index = 0;
335 rcb->consumer_index = 0;
336
337 unmap_q->producer_index = 0;
338 unmap_q->consumer_index = 0;
339}
340
341static void
342bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
343{
344 struct bnad_unmap_q *unmap_q;
345 struct bnad_skb_unmap *unmap_array;
346 struct sk_buff *skb;
347 int unmap_cons;
348
349 unmap_q = rcb->unmap_q;
350 unmap_array = unmap_q->unmap_array;
351 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
352 skb = unmap_array[unmap_cons].skb;
353 if (!skb)
354 continue;
355 unmap_array[unmap_cons].skb = NULL;
356 dma_unmap_single(&bnad->pcidev->dev,
357 dma_unmap_addr(&unmap_array[unmap_cons],
358 dma_addr),
359 rcb->rxq->buffer_size,
360 DMA_FROM_DEVICE);
361 dev_kfree_skb(skb);
362 }
363 bnad_reset_rcb(bnad, rcb);
364}
365
366static void
367bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
368{
369 u16 to_alloc, alloced, unmap_prod, wi_range;
370 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
371 struct bnad_skb_unmap *unmap_array;
372 struct bna_rxq_entry *rxent;
373 struct sk_buff *skb;
374 dma_addr_t dma_addr;
375
376 alloced = 0;
377 to_alloc =
378 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
379
380 unmap_array = unmap_q->unmap_array;
381 unmap_prod = unmap_q->producer_index;
382
383 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
384
385 while (to_alloc--) {
386 if (!wi_range) {
387 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
388 wi_range);
389 }
390 skb = netdev_alloc_skb_ip_align(bnad->netdev,
391 rcb->rxq->buffer_size);
392 if (unlikely(!skb)) {
393 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394 goto finishing;
395 }
396 unmap_array[unmap_prod].skb = skb;
397 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
398 rcb->rxq->buffer_size,
399 DMA_FROM_DEVICE);
400 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
401 dma_addr);
402 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
403 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
404
405 rxent++;
406 wi_range--;
407 alloced++;
408 }
409
410finishing:
411 if (likely(alloced)) {
412 unmap_q->producer_index = unmap_prod;
413 rcb->producer_index = unmap_prod;
414 smp_mb();
415 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
416 bna_rxq_prod_indx_doorbell(rcb);
417 }
418}
419
420static inline void
421bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
422{
423 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
424
425 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
426 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
427 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
428 bnad_alloc_n_post_rxbufs(bnad, rcb);
429 smp_mb__before_clear_bit();
430 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
431 }
432}
433
434static u32
435bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
436{
437 struct bna_cq_entry *cmpl, *next_cmpl;
438 struct bna_rcb *rcb = NULL;
439 unsigned int wi_range, packets = 0, wis = 0;
440 struct bnad_unmap_q *unmap_q;
441 struct bnad_skb_unmap *unmap_array;
442 struct sk_buff *skb;
443 u32 flags, unmap_cons;
444 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
445 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
446
447 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
448 return 0;
449
450 prefetch(bnad->netdev);
451 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
452 wi_range);
453 BUG_ON(!(wi_range <= ccb->q_depth));
454 while (cmpl->valid && packets < budget) {
455 packets++;
456 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
457
458 if (qid0 == cmpl->rxq_id)
459 rcb = ccb->rcb[0];
460 else
461 rcb = ccb->rcb[1];
462
463 unmap_q = rcb->unmap_q;
464 unmap_array = unmap_q->unmap_array;
465 unmap_cons = unmap_q->consumer_index;
466
467 skb = unmap_array[unmap_cons].skb;
468 BUG_ON(!(skb));
469 unmap_array[unmap_cons].skb = NULL;
470 dma_unmap_single(&bnad->pcidev->dev,
471 dma_unmap_addr(&unmap_array[unmap_cons],
472 dma_addr),
473 rcb->rxq->buffer_size,
474 DMA_FROM_DEVICE);
475 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
476
477 /* Should be more efficient ? Performance ? */
478 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
479
480 wis++;
481 if (likely(--wi_range))
482 next_cmpl = cmpl + 1;
483 else {
484 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
485 wis = 0;
486 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
487 next_cmpl, wi_range);
488 BUG_ON(!(wi_range <= ccb->q_depth));
489 }
490 prefetch(next_cmpl);
491
492 flags = ntohl(cmpl->flags);
493 if (unlikely
494 (flags &
495 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
496 BNA_CQ_EF_TOO_LONG))) {
497 dev_kfree_skb_any(skb);
498 rcb->rxq->rx_packets_with_error++;
499 goto next;
500 }
501
502 skb_put(skb, ntohs(cmpl->length));
503 if (likely
504 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
505 (((flags & BNA_CQ_EF_IPV4) &&
506 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
507 (flags & BNA_CQ_EF_IPV6)) &&
508 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
509 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
510 skb->ip_summed = CHECKSUM_UNNECESSARY;
511 else
512 skb_checksum_none_assert(skb);
513
514 rcb->rxq->rx_packets++;
515 rcb->rxq->rx_bytes += skb->len;
516 skb->protocol = eth_type_trans(skb, bnad->netdev);
517
518 if (flags & BNA_CQ_EF_VLAN)
519 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
520
521 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
522 struct bnad_rx_ctrl *rx_ctrl;
523
524 rx_ctrl = (struct bnad_rx_ctrl *) ccb->ctrl;
525 napi_gro_receive(&rx_ctrl->napi, skb);
526 } else {
527 netif_receive_skb(skb);
528 }
529
530next:
531 cmpl->valid = 0;
532 cmpl = next_cmpl;
533 }
534
535 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
536
537 if (likely(ccb)) {
538 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
539 bna_ib_ack(ccb->i_dbell, packets);
540 bnad_refill_rxq(bnad, ccb->rcb[0]);
541 if (ccb->rcb[1])
542 bnad_refill_rxq(bnad, ccb->rcb[1]);
543 } else {
544 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
545 bna_ib_ack(ccb->i_dbell, 0);
546 }
547
548 return packets;
549}
550
551static void
552bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
553{
554 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
555 return;
556
557 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
558 bna_ib_ack(ccb->i_dbell, 0);
559}
560
561static void
562bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
563{
564 unsigned long flags;
565
566 /* Because of polling context */
567 spin_lock_irqsave(&bnad->bna_lock, flags);
568 bnad_enable_rx_irq_unsafe(ccb);
569 spin_unlock_irqrestore(&bnad->bna_lock, flags);
570}
571
572static void
573bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
574{
575 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
576 struct napi_struct *napi = &rx_ctrl->napi;
577
578 if (likely(napi_schedule_prep(napi))) {
579 bnad_disable_rx_irq(bnad, ccb);
580 __napi_schedule(napi);
581 }
582 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
583}
584
585/* MSIX Rx Path Handler */
586static irqreturn_t
587bnad_msix_rx(int irq, void *data)
588{
589 struct bna_ccb *ccb = (struct bna_ccb *)data;
590 struct bnad *bnad = ccb->bnad;
591
592 bnad_netif_rx_schedule_poll(bnad, ccb);
593
594 return IRQ_HANDLED;
595}
596
597/* Interrupt handlers */
598
599/* Mbox Interrupt Handlers */
600static irqreturn_t
601bnad_msix_mbox_handler(int irq, void *data)
602{
603 u32 intr_status;
604 unsigned long flags;
605 struct bnad *bnad = (struct bnad *)data;
606
607 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
608 return IRQ_HANDLED;
609
610 spin_lock_irqsave(&bnad->bna_lock, flags);
611
612 bna_intr_status_get(&bnad->bna, intr_status);
613
614 if (BNA_IS_MBOX_ERR_INTR(intr_status))
615 bna_mbox_handler(&bnad->bna, intr_status);
616
617 spin_unlock_irqrestore(&bnad->bna_lock, flags);
618
619 return IRQ_HANDLED;
620}
621
622static irqreturn_t
623bnad_isr(int irq, void *data)
624{
625 int i, j;
626 u32 intr_status;
627 unsigned long flags;
628 struct bnad *bnad = (struct bnad *)data;
629 struct bnad_rx_info *rx_info;
630 struct bnad_rx_ctrl *rx_ctrl;
631
632 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
633 return IRQ_NONE;
634
635 bna_intr_status_get(&bnad->bna, intr_status);
636
637 if (unlikely(!intr_status))
638 return IRQ_NONE;
639
640 spin_lock_irqsave(&bnad->bna_lock, flags);
641
642 if (BNA_IS_MBOX_ERR_INTR(intr_status))
643 bna_mbox_handler(&bnad->bna, intr_status);
644
645 spin_unlock_irqrestore(&bnad->bna_lock, flags);
646
647 if (!BNA_IS_INTX_DATA_INTR(intr_status))
648 return IRQ_HANDLED;
649
650 /* Process data interrupts */
651 /* Tx processing */
652 for (i = 0; i < bnad->num_tx; i++) {
653 for (j = 0; j < bnad->num_txq_per_tx; j++)
654 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
655 }
656 /* Rx processing */
657 for (i = 0; i < bnad->num_rx; i++) {
658 rx_info = &bnad->rx_info[i];
659 if (!rx_info->rx)
660 continue;
661 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
662 rx_ctrl = &rx_info->rx_ctrl[j];
663 if (rx_ctrl->ccb)
664 bnad_netif_rx_schedule_poll(bnad,
665 rx_ctrl->ccb);
666 }
667 }
668 return IRQ_HANDLED;
669}
670
671/*
672 * Called in interrupt / callback context
673 * with bna_lock held, so cfg_flags access is OK
674 */
675static void
676bnad_enable_mbox_irq(struct bnad *bnad)
677{
678 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
679
680 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
681}
682
683/*
684 * Called with bnad->bna_lock held b'cos of
685 * bnad->cfg_flags access.
686 */
687static void
688bnad_disable_mbox_irq(struct bnad *bnad)
689{
690 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
691
692 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
693}
694
695static void
696bnad_set_netdev_perm_addr(struct bnad *bnad)
697{
698 struct net_device *netdev = bnad->netdev;
699
700 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
701 if (is_zero_ether_addr(netdev->dev_addr))
702 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
703}
704
705/* Control Path Handlers */
706
707/* Callbacks */
708void
709bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
710{
711 bnad_enable_mbox_irq(bnad);
712}
713
714void
715bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
716{
717 bnad_disable_mbox_irq(bnad);
718}
719
720void
721bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
722{
723 complete(&bnad->bnad_completions.ioc_comp);
724 bnad->bnad_completions.ioc_comp_status = status;
725}
726
727void
728bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
729{
730 complete(&bnad->bnad_completions.ioc_comp);
731 bnad->bnad_completions.ioc_comp_status = status;
732}
733
734static void
735bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
736{
737 struct bnad *bnad = (struct bnad *)arg;
738
739 complete(&bnad->bnad_completions.port_comp);
740
741 netif_carrier_off(bnad->netdev);
742}
743
744void
745bnad_cb_port_link_status(struct bnad *bnad,
746 enum bna_link_status link_status)
747{
748 bool link_up = 0;
749
750 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
751
752 if (link_status == BNA_CEE_UP) {
753 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
754 BNAD_UPDATE_CTR(bnad, cee_up);
755 } else
756 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
757
758 if (link_up) {
759 if (!netif_carrier_ok(bnad->netdev)) {
760 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
761 if (!tcb)
762 return;
763 pr_warn("bna: %s link up\n",
764 bnad->netdev->name);
765 netif_carrier_on(bnad->netdev);
766 BNAD_UPDATE_CTR(bnad, link_toggle);
767 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
768 /* Force an immediate Transmit Schedule */
769 pr_info("bna: %s TX_STARTED\n",
770 bnad->netdev->name);
771 netif_wake_queue(bnad->netdev);
772 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
773 } else {
774 netif_stop_queue(bnad->netdev);
775 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
776 }
777 }
778 } else {
779 if (netif_carrier_ok(bnad->netdev)) {
780 pr_warn("bna: %s link down\n",
781 bnad->netdev->name);
782 netif_carrier_off(bnad->netdev);
783 BNAD_UPDATE_CTR(bnad, link_toggle);
784 }
785 }
786}
787
788static void
789bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
790 enum bna_cb_status status)
791{
792 struct bnad *bnad = (struct bnad *)arg;
793
794 complete(&bnad->bnad_completions.tx_comp);
795}
796
797static void
798bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
799{
800 struct bnad_tx_info *tx_info =
801 (struct bnad_tx_info *)tcb->txq->tx->priv;
802 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
803
804 tx_info->tcb[tcb->id] = tcb;
805 unmap_q->producer_index = 0;
806 unmap_q->consumer_index = 0;
807 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
808}
809
810static void
811bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
812{
813 struct bnad_tx_info *tx_info =
814 (struct bnad_tx_info *)tcb->txq->tx->priv;
815 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
816
817 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
818 cpu_relax();
819
820 bnad_free_all_txbufs(bnad, tcb);
821
822 unmap_q->producer_index = 0;
823 unmap_q->consumer_index = 0;
824
825 smp_mb__before_clear_bit();
826 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
827
828 tx_info->tcb[tcb->id] = NULL;
829}
830
831static void
832bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
833{
834 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
835
836 unmap_q->producer_index = 0;
837 unmap_q->consumer_index = 0;
838 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
839}
840
841static void
842bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
843{
844 bnad_free_all_rxbufs(bnad, rcb);
845}
846
847static void
848bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
849{
850 struct bnad_rx_info *rx_info =
851 (struct bnad_rx_info *)ccb->cq->rx->priv;
852
853 rx_info->rx_ctrl[ccb->id].ccb = ccb;
854 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
855}
856
857static void
858bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
859{
860 struct bnad_rx_info *rx_info =
861 (struct bnad_rx_info *)ccb->cq->rx->priv;
862
863 rx_info->rx_ctrl[ccb->id].ccb = NULL;
864}
865
866static void
867bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
868{
869 struct bnad_tx_info *tx_info =
870 (struct bnad_tx_info *)tcb->txq->tx->priv;
871
872 if (tx_info != &bnad->tx_info[0])
873 return;
874
875 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
876 netif_stop_queue(bnad->netdev);
877 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
878}
879
880static void
881bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
882{
883 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
884
885 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
886 return;
887
888 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
889
890 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
891 cpu_relax();
892
893 bnad_free_all_txbufs(bnad, tcb);
894
895 unmap_q->producer_index = 0;
896 unmap_q->consumer_index = 0;
897
898 smp_mb__before_clear_bit();
899 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
900
901 /*
902 * Workaround for first device enable failure & we
903 * get a 0 MAC address. We try to get the MAC address
904 * again here.
905 */
906 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
907 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
908 bnad_set_netdev_perm_addr(bnad);
909 }
910
911 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
912
913 if (netif_carrier_ok(bnad->netdev)) {
914 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
915 netif_wake_queue(bnad->netdev);
916 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
917 }
918}
919
920static void
921bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
922{
923 /* Delay only once for the whole Tx Path Shutdown */
924 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
925 mdelay(BNAD_TXRX_SYNC_MDELAY);
926}
927
928static void
929bnad_cb_rx_cleanup(struct bnad *bnad,
930 struct bna_ccb *ccb)
931{
932 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
933
934 if (ccb->rcb[1])
935 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
936
937 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
938 mdelay(BNAD_TXRX_SYNC_MDELAY);
939}
940
941static void
942bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
943{
944 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
945
946 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
947
948 if (rcb == rcb->cq->ccb->rcb[0])
949 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
950
951 bnad_free_all_rxbufs(bnad, rcb);
952
953 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
954
955 /* Now allocate & post buffers for this RCB */
956 /* !!Allocation in callback context */
957 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
958 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
959 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
960 bnad_alloc_n_post_rxbufs(bnad, rcb);
961 smp_mb__before_clear_bit();
962 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
963 }
964}
965
966static void
967bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
968 enum bna_cb_status status)
969{
970 struct bnad *bnad = (struct bnad *)arg;
971
972 complete(&bnad->bnad_completions.rx_comp);
973}
974
975static void
976bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
977 enum bna_cb_status status)
978{
979 bnad->bnad_completions.mcast_comp_status = status;
980 complete(&bnad->bnad_completions.mcast_comp);
981}
982
983void
984bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
985 struct bna_stats *stats)
986{
987 if (status == BNA_CB_SUCCESS)
988 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
989
990 if (!netif_running(bnad->netdev) ||
991 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
992 return;
993
994 mod_timer(&bnad->stats_timer,
995 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
996}
997
998/* Resource allocation, free functions */
999
1000static void
1001bnad_mem_free(struct bnad *bnad,
1002 struct bna_mem_info *mem_info)
1003{
1004 int i;
1005 dma_addr_t dma_pa;
1006
1007 if (mem_info->mdl == NULL)
1008 return;
1009
1010 for (i = 0; i < mem_info->num; i++) {
1011 if (mem_info->mdl[i].kva != NULL) {
1012 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1013 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1014 dma_pa);
1015 dma_free_coherent(&bnad->pcidev->dev,
1016 mem_info->mdl[i].len,
1017 mem_info->mdl[i].kva, dma_pa);
1018 } else
1019 kfree(mem_info->mdl[i].kva);
1020 }
1021 }
1022 kfree(mem_info->mdl);
1023 mem_info->mdl = NULL;
1024}
1025
1026static int
1027bnad_mem_alloc(struct bnad *bnad,
1028 struct bna_mem_info *mem_info)
1029{
1030 int i;
1031 dma_addr_t dma_pa;
1032
1033 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1034 mem_info->mdl = NULL;
1035 return 0;
1036 }
1037
1038 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1039 GFP_KERNEL);
1040 if (mem_info->mdl == NULL)
1041 return -ENOMEM;
1042
1043 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1044 for (i = 0; i < mem_info->num; i++) {
1045 mem_info->mdl[i].len = mem_info->len;
1046 mem_info->mdl[i].kva =
1047 dma_alloc_coherent(&bnad->pcidev->dev,
1048 mem_info->len, &dma_pa,
1049 GFP_KERNEL);
1050
1051 if (mem_info->mdl[i].kva == NULL)
1052 goto err_return;
1053
1054 BNA_SET_DMA_ADDR(dma_pa,
1055 &(mem_info->mdl[i].dma));
1056 }
1057 } else {
1058 for (i = 0; i < mem_info->num; i++) {
1059 mem_info->mdl[i].len = mem_info->len;
1060 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1061 GFP_KERNEL);
1062 if (mem_info->mdl[i].kva == NULL)
1063 goto err_return;
1064 }
1065 }
1066
1067 return 0;
1068
1069err_return:
1070 bnad_mem_free(bnad, mem_info);
1071 return -ENOMEM;
1072}
1073
1074/* Free IRQ for Mailbox */
1075static void
1076bnad_mbox_irq_free(struct bnad *bnad,
1077 struct bna_intr_info *intr_info)
1078{
1079 int irq;
1080 unsigned long flags;
1081
1082 if (intr_info->idl == NULL)
1083 return;
1084
1085 spin_lock_irqsave(&bnad->bna_lock, flags);
1086 bnad_disable_mbox_irq(bnad);
1087 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1088
1089 irq = BNAD_GET_MBOX_IRQ(bnad);
1090 free_irq(irq, bnad);
1091
1092 kfree(intr_info->idl);
1093}
1094
1095/*
1096 * Allocates IRQ for Mailbox, but keep it disabled
1097 * This will be enabled once we get the mbox enable callback
1098 * from bna
1099 */
1100static int
1101bnad_mbox_irq_alloc(struct bnad *bnad,
1102 struct bna_intr_info *intr_info)
1103{
1104 int err = 0;
1105 unsigned long irq_flags, flags;
1106 u32 irq;
1107 irq_handler_t irq_handler;
1108
1109 /* Mbox should use only 1 vector */
1110
1111 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1112 if (!intr_info->idl)
1113 return -ENOMEM;
1114
1115 spin_lock_irqsave(&bnad->bna_lock, flags);
1116 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1117 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1118 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1119 irq_flags = 0;
1120 intr_info->intr_type = BNA_INTR_T_MSIX;
1121 intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
1122 } else {
1123 irq_handler = (irq_handler_t)bnad_isr;
1124 irq = bnad->pcidev->irq;
1125 irq_flags = IRQF_SHARED;
1126 intr_info->intr_type = BNA_INTR_T_INTX;
1127 }
1128
1129 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1130 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1131
1132 /*
1133 * Set the Mbox IRQ disable flag, so that the IRQ handler
1134 * called from request_irq() for SHARED IRQs do not execute
1135 */
1136 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1137
1138 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1139
1140 err = request_irq(irq, irq_handler, irq_flags,
1141 bnad->mbox_irq_name, bnad);
1142
1143 if (err) {
1144 kfree(intr_info->idl);
1145 intr_info->idl = NULL;
1146 }
1147
1148 return err;
1149}
1150
1151static void
1152bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1153{
1154 kfree(intr_info->idl);
1155 intr_info->idl = NULL;
1156}
1157
1158/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1159static int
1160bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1161 uint txrx_id, struct bna_intr_info *intr_info)
1162{
1163 int i, vector_start = 0;
1164 u32 cfg_flags;
1165 unsigned long flags;
1166
1167 spin_lock_irqsave(&bnad->bna_lock, flags);
1168 cfg_flags = bnad->cfg_flags;
1169 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1170
1171 if (cfg_flags & BNAD_CF_MSIX) {
1172 intr_info->intr_type = BNA_INTR_T_MSIX;
1173 intr_info->idl = kcalloc(intr_info->num,
1174 sizeof(struct bna_intr_descr),
1175 GFP_KERNEL);
1176 if (!intr_info->idl)
1177 return -ENOMEM;
1178
1179 switch (src) {
1180 case BNAD_INTR_TX:
1181 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1182 break;
1183
1184 case BNAD_INTR_RX:
1185 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1186 (bnad->num_tx * bnad->num_txq_per_tx) +
1187 txrx_id;
1188 break;
1189
1190 default:
1191 BUG();
1192 }
1193
1194 for (i = 0; i < intr_info->num; i++)
1195 intr_info->idl[i].vector = vector_start + i;
1196 } else {
1197 intr_info->intr_type = BNA_INTR_T_INTX;
1198 intr_info->num = 1;
1199 intr_info->idl = kcalloc(intr_info->num,
1200 sizeof(struct bna_intr_descr),
1201 GFP_KERNEL);
1202 if (!intr_info->idl)
1203 return -ENOMEM;
1204
1205 switch (src) {
1206 case BNAD_INTR_TX:
1207 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1208 break;
1209
1210 case BNAD_INTR_RX:
1211 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1212 break;
1213 }
1214 }
1215 return 0;
1216}
1217
1218/**
1219 * NOTE: Should be called for MSIX only
1220 * Unregisters Tx MSIX vector(s) from the kernel
1221 */
1222static void
1223bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1224 int num_txqs)
1225{
1226 int i;
1227 int vector_num;
1228
1229 for (i = 0; i < num_txqs; i++) {
1230 if (tx_info->tcb[i] == NULL)
1231 continue;
1232
1233 vector_num = tx_info->tcb[i]->intr_vector;
1234 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1235 }
1236}
1237
1238/**
1239 * NOTE: Should be called for MSIX only
1240 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1241 */
1242static int
1243bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1244 uint tx_id, int num_txqs)
1245{
1246 int i;
1247 int err;
1248 int vector_num;
1249
1250 for (i = 0; i < num_txqs; i++) {
1251 vector_num = tx_info->tcb[i]->intr_vector;
1252 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1253 tx_id + tx_info->tcb[i]->id);
1254 err = request_irq(bnad->msix_table[vector_num].vector,
1255 (irq_handler_t)bnad_msix_tx, 0,
1256 tx_info->tcb[i]->name,
1257 tx_info->tcb[i]);
1258 if (err)
1259 goto err_return;
1260 }
1261
1262 return 0;
1263
1264err_return:
1265 if (i > 0)
1266 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1267 return -1;
1268}
1269
1270/**
1271 * NOTE: Should be called for MSIX only
1272 * Unregisters Rx MSIX vector(s) from the kernel
1273 */
1274static void
1275bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1276 int num_rxps)
1277{
1278 int i;
1279 int vector_num;
1280
1281 for (i = 0; i < num_rxps; i++) {
1282 if (rx_info->rx_ctrl[i].ccb == NULL)
1283 continue;
1284
1285 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1286 free_irq(bnad->msix_table[vector_num].vector,
1287 rx_info->rx_ctrl[i].ccb);
1288 }
1289}
1290
1291/**
1292 * NOTE: Should be called for MSIX only
1293 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1294 */
1295static int
1296bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1297 uint rx_id, int num_rxps)
1298{
1299 int i;
1300 int err;
1301 int vector_num;
1302
1303 for (i = 0; i < num_rxps; i++) {
1304 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1305 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1306 bnad->netdev->name,
1307 rx_id + rx_info->rx_ctrl[i].ccb->id);
1308 err = request_irq(bnad->msix_table[vector_num].vector,
1309 (irq_handler_t)bnad_msix_rx, 0,
1310 rx_info->rx_ctrl[i].ccb->name,
1311 rx_info->rx_ctrl[i].ccb);
1312 if (err)
1313 goto err_return;
1314 }
1315
1316 return 0;
1317
1318err_return:
1319 if (i > 0)
1320 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1321 return -1;
1322}
1323
1324/* Free Tx object Resources */
1325static void
1326bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1327{
1328 int i;
1329
1330 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1331 if (res_info[i].res_type == BNA_RES_T_MEM)
1332 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1333 else if (res_info[i].res_type == BNA_RES_T_INTR)
1334 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1335 }
1336}
1337
1338/* Allocates memory and interrupt resources for Tx object */
1339static int
1340bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1341 uint tx_id)
1342{
1343 int i, err = 0;
1344
1345 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1346 if (res_info[i].res_type == BNA_RES_T_MEM)
1347 err = bnad_mem_alloc(bnad,
1348 &res_info[i].res_u.mem_info);
1349 else if (res_info[i].res_type == BNA_RES_T_INTR)
1350 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1351 &res_info[i].res_u.intr_info);
1352 if (err)
1353 goto err_return;
1354 }
1355 return 0;
1356
1357err_return:
1358 bnad_tx_res_free(bnad, res_info);
1359 return err;
1360}
1361
1362/* Free Rx object Resources */
1363static void
1364bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1365{
1366 int i;
1367
1368 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1369 if (res_info[i].res_type == BNA_RES_T_MEM)
1370 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1371 else if (res_info[i].res_type == BNA_RES_T_INTR)
1372 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1373 }
1374}
1375
1376/* Allocates memory and interrupt resources for Rx object */
1377static int
1378bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1379 uint rx_id)
1380{
1381 int i, err = 0;
1382
1383 /* All memory needs to be allocated before setup_ccbs */
1384 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1385 if (res_info[i].res_type == BNA_RES_T_MEM)
1386 err = bnad_mem_alloc(bnad,
1387 &res_info[i].res_u.mem_info);
1388 else if (res_info[i].res_type == BNA_RES_T_INTR)
1389 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1390 &res_info[i].res_u.intr_info);
1391 if (err)
1392 goto err_return;
1393 }
1394 return 0;
1395
1396err_return:
1397 bnad_rx_res_free(bnad, res_info);
1398 return err;
1399}
1400
1401/* Timer callbacks */
1402/* a) IOC timer */
1403static void
1404bnad_ioc_timeout(unsigned long data)
1405{
1406 struct bnad *bnad = (struct bnad *)data;
1407 unsigned long flags;
1408
1409 spin_lock_irqsave(&bnad->bna_lock, flags);
1410 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1411 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1412}
1413
1414static void
1415bnad_ioc_hb_check(unsigned long data)
1416{
1417 struct bnad *bnad = (struct bnad *)data;
1418 unsigned long flags;
1419
1420 spin_lock_irqsave(&bnad->bna_lock, flags);
1421 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423}
1424
1425static void
1426bnad_iocpf_timeout(unsigned long data)
1427{
1428 struct bnad *bnad = (struct bnad *)data;
1429 unsigned long flags;
1430
1431 spin_lock_irqsave(&bnad->bna_lock, flags);
1432 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1433 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1434}
1435
1436static void
1437bnad_iocpf_sem_timeout(unsigned long data)
1438{
1439 struct bnad *bnad = (struct bnad *)data;
1440 unsigned long flags;
1441
1442 spin_lock_irqsave(&bnad->bna_lock, flags);
1443 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
1444 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1445}
1446
1447/*
1448 * All timer routines use bnad->bna_lock to protect against
1449 * the following race, which may occur in case of no locking:
1450 * Time CPU m CPU n
1451 * 0 1 = test_bit
1452 * 1 clear_bit
1453 * 2 del_timer_sync
1454 * 3 mod_timer
1455 */
1456
1457/* b) Dynamic Interrupt Moderation Timer */
1458static void
1459bnad_dim_timeout(unsigned long data)
1460{
1461 struct bnad *bnad = (struct bnad *)data;
1462 struct bnad_rx_info *rx_info;
1463 struct bnad_rx_ctrl *rx_ctrl;
1464 int i, j;
1465 unsigned long flags;
1466
1467 if (!netif_carrier_ok(bnad->netdev))
1468 return;
1469
1470 spin_lock_irqsave(&bnad->bna_lock, flags);
1471 for (i = 0; i < bnad->num_rx; i++) {
1472 rx_info = &bnad->rx_info[i];
1473 if (!rx_info->rx)
1474 continue;
1475 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1476 rx_ctrl = &rx_info->rx_ctrl[j];
1477 if (!rx_ctrl->ccb)
1478 continue;
1479 bna_rx_dim_update(rx_ctrl->ccb);
1480 }
1481 }
1482
1483 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1484 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1485 mod_timer(&bnad->dim_timer,
1486 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1488}
1489
1490/* c) Statistics Timer */
1491static void
1492bnad_stats_timeout(unsigned long data)
1493{
1494 struct bnad *bnad = (struct bnad *)data;
1495 unsigned long flags;
1496
1497 if (!netif_running(bnad->netdev) ||
1498 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1499 return;
1500
1501 spin_lock_irqsave(&bnad->bna_lock, flags);
1502 bna_stats_get(&bnad->bna);
1503 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1504}
1505
1506/*
1507 * Set up timer for DIM
1508 * Called with bnad->bna_lock held
1509 */
1510void
1511bnad_dim_timer_start(struct bnad *bnad)
1512{
1513 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1514 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1515 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1516 (unsigned long)bnad);
1517 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1518 mod_timer(&bnad->dim_timer,
1519 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1520 }
1521}
1522
1523/*
1524 * Set up timer for statistics
1525 * Called with mutex_lock(&bnad->conf_mutex) held
1526 */
1527static void
1528bnad_stats_timer_start(struct bnad *bnad)
1529{
1530 unsigned long flags;
1531
1532 spin_lock_irqsave(&bnad->bna_lock, flags);
1533 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1534 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1535 (unsigned long)bnad);
1536 mod_timer(&bnad->stats_timer,
1537 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1538 }
1539 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1540}
1541
1542/*
1543 * Stops the stats timer
1544 * Called with mutex_lock(&bnad->conf_mutex) held
1545 */
1546static void
1547bnad_stats_timer_stop(struct bnad *bnad)
1548{
1549 int to_del = 0;
1550 unsigned long flags;
1551
1552 spin_lock_irqsave(&bnad->bna_lock, flags);
1553 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1554 to_del = 1;
1555 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1556 if (to_del)
1557 del_timer_sync(&bnad->stats_timer);
1558}
1559
1560/* Utilities */
1561
1562static void
1563bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1564{
1565 int i = 1; /* Index 0 has broadcast address */
1566 struct netdev_hw_addr *mc_addr;
1567
1568 netdev_for_each_mc_addr(mc_addr, netdev) {
1569 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1570 ETH_ALEN);
1571 i++;
1572 }
1573}
1574
1575static int
1576bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1577{
1578 struct bnad_rx_ctrl *rx_ctrl =
1579 container_of(napi, struct bnad_rx_ctrl, napi);
1580 struct bna_ccb *ccb;
1581 struct bnad *bnad;
1582 int rcvd = 0;
1583
1584 ccb = rx_ctrl->ccb;
1585
1586 bnad = ccb->bnad;
1587
1588 if (!netif_carrier_ok(bnad->netdev))
1589 goto poll_exit;
1590
1591 rcvd = bnad_poll_cq(bnad, ccb, budget);
1592 if (rcvd == budget)
1593 return rcvd;
1594
1595poll_exit:
1596 napi_complete((napi));
1597
1598 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1599
1600 bnad_enable_rx_irq(bnad, ccb);
1601 return rcvd;
1602}
1603
1604static void
1605bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1606{
1607 struct bnad_rx_ctrl *rx_ctrl;
1608 int i;
1609
1610 /* Initialize & enable NAPI */
1611 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1612 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1613
1614 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1615 bnad_napi_poll_rx, 64);
1616
1617 napi_enable(&rx_ctrl->napi);
1618 }
1619}
1620
1621static void
1622bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1623{
1624 int i;
1625
1626 /* First disable and then clean up */
1627 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1628 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1629 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1630 }
1631}
1632
1633/* Should be held with conf_lock held */
1634void
1635bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1636{
1637 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1638 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1639 unsigned long flags;
1640
1641 if (!tx_info->tx)
1642 return;
1643
1644 init_completion(&bnad->bnad_completions.tx_comp);
1645 spin_lock_irqsave(&bnad->bna_lock, flags);
1646 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1647 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1648 wait_for_completion(&bnad->bnad_completions.tx_comp);
1649
1650 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1651 bnad_tx_msix_unregister(bnad, tx_info,
1652 bnad->num_txq_per_tx);
1653
1654 spin_lock_irqsave(&bnad->bna_lock, flags);
1655 bna_tx_destroy(tx_info->tx);
1656 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1657
1658 tx_info->tx = NULL;
1659
1660 if (0 == tx_id)
1661 tasklet_kill(&bnad->tx_free_tasklet);
1662
1663 bnad_tx_res_free(bnad, res_info);
1664}
1665
1666/* Should be held with conf_lock held */
1667int
1668bnad_setup_tx(struct bnad *bnad, uint tx_id)
1669{
1670 int err;
1671 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1672 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1673 struct bna_intr_info *intr_info =
1674 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1675 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1676 struct bna_tx_event_cbfn tx_cbfn;
1677 struct bna_tx *tx;
1678 unsigned long flags;
1679
1680 /* Initialize the Tx object configuration */
1681 tx_config->num_txq = bnad->num_txq_per_tx;
1682 tx_config->txq_depth = bnad->txq_depth;
1683 tx_config->tx_type = BNA_TX_T_REGULAR;
1684
1685 /* Initialize the tx event handlers */
1686 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1687 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1688 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1689 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1690 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1691
1692 /* Get BNA's resource requirement for one tx object */
1693 spin_lock_irqsave(&bnad->bna_lock, flags);
1694 bna_tx_res_req(bnad->num_txq_per_tx,
1695 bnad->txq_depth, res_info);
1696 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1697
1698 /* Fill Unmap Q memory requirements */
1699 BNAD_FILL_UNMAPQ_MEM_REQ(
1700 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1701 bnad->num_txq_per_tx,
1702 BNAD_TX_UNMAPQ_DEPTH);
1703
1704 /* Allocate resources */
1705 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1706 if (err)
1707 return err;
1708
1709 /* Ask BNA to create one Tx object, supplying required resources */
1710 spin_lock_irqsave(&bnad->bna_lock, flags);
1711 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1712 tx_info);
1713 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1714 if (!tx)
1715 goto err_return;
1716 tx_info->tx = tx;
1717
1718 /* Register ISR for the Tx object */
1719 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1720 err = bnad_tx_msix_register(bnad, tx_info,
1721 tx_id, bnad->num_txq_per_tx);
1722 if (err)
1723 goto err_return;
1724 }
1725
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 bna_tx_enable(tx);
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729
1730 return 0;
1731
1732err_return:
1733 bnad_tx_res_free(bnad, res_info);
1734 return err;
1735}
1736
1737/* Setup the rx config for bna_rx_create */
1738/* bnad decides the configuration */
1739static void
1740bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1741{
1742 rx_config->rx_type = BNA_RX_T_REGULAR;
1743 rx_config->num_paths = bnad->num_rxp_per_rx;
1744
1745 if (bnad->num_rxp_per_rx > 1) {
1746 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1747 rx_config->rss_config.hash_type =
1748 (BFI_RSS_T_V4_TCP |
1749 BFI_RSS_T_V6_TCP |
1750 BFI_RSS_T_V4_IP |
1751 BFI_RSS_T_V6_IP);
1752 rx_config->rss_config.hash_mask =
1753 bnad->num_rxp_per_rx - 1;
1754 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1755 sizeof(rx_config->rss_config.toeplitz_hash_key));
1756 } else {
1757 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1758 memset(&rx_config->rss_config, 0,
1759 sizeof(rx_config->rss_config));
1760 }
1761 rx_config->rxp_type = BNA_RXP_SLR;
1762 rx_config->q_depth = bnad->rxq_depth;
1763
1764 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1765
1766 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1767}
1768
1769/* Called with mutex_lock(&bnad->conf_mutex) held */
1770void
1771bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1772{
1773 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1774 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1775 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1776 unsigned long flags;
1777 int dim_timer_del = 0;
1778
1779 if (!rx_info->rx)
1780 return;
1781
1782 if (0 == rx_id) {
1783 spin_lock_irqsave(&bnad->bna_lock, flags);
1784 dim_timer_del = bnad_dim_timer_running(bnad);
1785 if (dim_timer_del)
1786 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788 if (dim_timer_del)
1789 del_timer_sync(&bnad->dim_timer);
1790 }
1791
1792 bnad_napi_disable(bnad, rx_id);
1793
1794 init_completion(&bnad->bnad_completions.rx_comp);
1795 spin_lock_irqsave(&bnad->bna_lock, flags);
1796 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1798 wait_for_completion(&bnad->bnad_completions.rx_comp);
1799
1800 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1801 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1802
1803 spin_lock_irqsave(&bnad->bna_lock, flags);
1804 bna_rx_destroy(rx_info->rx);
1805 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1806
1807 rx_info->rx = NULL;
1808
1809 bnad_rx_res_free(bnad, res_info);
1810}
1811
1812/* Called with mutex_lock(&bnad->conf_mutex) held */
1813int
1814bnad_setup_rx(struct bnad *bnad, uint rx_id)
1815{
1816 int err;
1817 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1818 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1819 struct bna_intr_info *intr_info =
1820 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1821 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1822 struct bna_rx_event_cbfn rx_cbfn;
1823 struct bna_rx *rx;
1824 unsigned long flags;
1825
1826 /* Initialize the Rx object configuration */
1827 bnad_init_rx_config(bnad, rx_config);
1828
1829 /* Initialize the Rx event handlers */
1830 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1831 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1832 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1833 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1834 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1835 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1836
1837 /* Get BNA's resource requirement for one Rx object */
1838 spin_lock_irqsave(&bnad->bna_lock, flags);
1839 bna_rx_res_req(rx_config, res_info);
1840 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1841
1842 /* Fill Unmap Q memory requirements */
1843 BNAD_FILL_UNMAPQ_MEM_REQ(
1844 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1845 rx_config->num_paths +
1846 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1847 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1848
1849 /* Allocate resource */
1850 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1851 if (err)
1852 return err;
1853
1854 /* Ask BNA to create one Rx object, supplying required resources */
1855 spin_lock_irqsave(&bnad->bna_lock, flags);
1856 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1857 rx_info);
1858 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1859 if (!rx)
1860 goto err_return;
1861 rx_info->rx = rx;
1862
1863 /* Register ISR for the Rx object */
1864 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1865 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1866 rx_config->num_paths);
1867 if (err)
1868 goto err_return;
1869 }
1870
1871 /* Enable NAPI */
1872 bnad_napi_enable(bnad, rx_id);
1873
1874 spin_lock_irqsave(&bnad->bna_lock, flags);
1875 if (0 == rx_id) {
1876 /* Set up Dynamic Interrupt Moderation Vector */
1877 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1878 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1879
1880 /* Enable VLAN filtering only on the default Rx */
1881 bna_rx_vlanfilter_enable(rx);
1882
1883 /* Start the DIM timer */
1884 bnad_dim_timer_start(bnad);
1885 }
1886
1887 bna_rx_enable(rx);
1888 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1889
1890 return 0;
1891
1892err_return:
1893 bnad_cleanup_rx(bnad, rx_id);
1894 return err;
1895}
1896
1897/* Called with conf_lock & bnad->bna_lock held */
1898void
1899bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1900{
1901 struct bnad_tx_info *tx_info;
1902
1903 tx_info = &bnad->tx_info[0];
1904 if (!tx_info->tx)
1905 return;
1906
1907 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1908}
1909
1910/* Called with conf_lock & bnad->bna_lock held */
1911void
1912bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1913{
1914 struct bnad_rx_info *rx_info;
1915 int i;
1916
1917 for (i = 0; i < bnad->num_rx; i++) {
1918 rx_info = &bnad->rx_info[i];
1919 if (!rx_info->rx)
1920 continue;
1921 bna_rx_coalescing_timeo_set(rx_info->rx,
1922 bnad->rx_coalescing_timeo);
1923 }
1924}
1925
1926/*
1927 * Called with bnad->bna_lock held
1928 */
1929static int
1930bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1931{
1932 int ret;
1933
1934 if (!is_valid_ether_addr(mac_addr))
1935 return -EADDRNOTAVAIL;
1936
1937 /* If datapath is down, pretend everything went through */
1938 if (!bnad->rx_info[0].rx)
1939 return 0;
1940
1941 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1942 if (ret != BNA_CB_SUCCESS)
1943 return -EADDRNOTAVAIL;
1944
1945 return 0;
1946}
1947
1948/* Should be called with conf_lock held */
1949static int
1950bnad_enable_default_bcast(struct bnad *bnad)
1951{
1952 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1953 int ret;
1954 unsigned long flags;
1955
1956 init_completion(&bnad->bnad_completions.mcast_comp);
1957
1958 spin_lock_irqsave(&bnad->bna_lock, flags);
1959 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1960 bnad_cb_rx_mcast_add);
1961 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1962
1963 if (ret == BNA_CB_SUCCESS)
1964 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1965 else
1966 return -ENODEV;
1967
1968 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1969 return -ENODEV;
1970
1971 return 0;
1972}
1973
1974/* Called with bnad_conf_lock() held */
1975static void
1976bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1977{
1978 u16 vid;
1979 unsigned long flags;
1980
1981 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1982
1983 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
1984 spin_lock_irqsave(&bnad->bna_lock, flags);
1985 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
1986 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1987 }
1988}
1989
1990/* Statistics utilities */
1991void
1992bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1993{
1994 int i, j;
1995
1996 for (i = 0; i < bnad->num_rx; i++) {
1997 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1998 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1999 stats->rx_packets += bnad->rx_info[i].
2000 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2001 stats->rx_bytes += bnad->rx_info[i].
2002 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2003 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2004 bnad->rx_info[i].rx_ctrl[j].ccb->
2005 rcb[1]->rxq) {
2006 stats->rx_packets +=
2007 bnad->rx_info[i].rx_ctrl[j].
2008 ccb->rcb[1]->rxq->rx_packets;
2009 stats->rx_bytes +=
2010 bnad->rx_info[i].rx_ctrl[j].
2011 ccb->rcb[1]->rxq->rx_bytes;
2012 }
2013 }
2014 }
2015 }
2016 for (i = 0; i < bnad->num_tx; i++) {
2017 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2018 if (bnad->tx_info[i].tcb[j]) {
2019 stats->tx_packets +=
2020 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2021 stats->tx_bytes +=
2022 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2023 }
2024 }
2025 }
2026}
2027
2028/*
2029 * Must be called with the bna_lock held.
2030 */
2031void
2032bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2033{
2034 struct bfi_ll_stats_mac *mac_stats;
2035 u64 bmap;
2036 int i;
2037
2038 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2039 stats->rx_errors =
2040 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2041 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2042 mac_stats->rx_undersize;
2043 stats->tx_errors = mac_stats->tx_fcs_error +
2044 mac_stats->tx_undersize;
2045 stats->rx_dropped = mac_stats->rx_drop;
2046 stats->tx_dropped = mac_stats->tx_drop;
2047 stats->multicast = mac_stats->rx_multicast;
2048 stats->collisions = mac_stats->tx_total_collision;
2049
2050 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2051
2052 /* receive ring buffer overflow ?? */
2053
2054 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2055 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2056 /* recv'r fifo overrun */
2057 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2058 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2059 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2060 if (bmap & 1) {
2061 stats->rx_fifo_errors +=
2062 bnad->stats.bna_stats->
2063 hw_stats->rxf_stats[i].frame_drops;
2064 break;
2065 }
2066 bmap >>= 1;
2067 }
2068}
2069
2070static void
2071bnad_mbox_irq_sync(struct bnad *bnad)
2072{
2073 u32 irq;
2074 unsigned long flags;
2075
2076 spin_lock_irqsave(&bnad->bna_lock, flags);
2077 if (bnad->cfg_flags & BNAD_CF_MSIX)
2078 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2079 else
2080 irq = bnad->pcidev->irq;
2081 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2082
2083 synchronize_irq(irq);
2084}
2085
2086/* Utility used by bnad_start_xmit, for doing TSO */
2087static int
2088bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2089{
2090 int err;
2091
2092 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2093 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2094 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2095 if (skb_header_cloned(skb)) {
2096 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2097 if (err) {
2098 BNAD_UPDATE_CTR(bnad, tso_err);
2099 return err;
2100 }
2101 }
2102
2103 /*
2104 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2105 * excluding the length field.
2106 */
2107 if (skb->protocol == htons(ETH_P_IP)) {
2108 struct iphdr *iph = ip_hdr(skb);
2109
2110 /* Do we really need these? */
2111 iph->tot_len = 0;
2112 iph->check = 0;
2113
2114 tcp_hdr(skb)->check =
2115 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2116 IPPROTO_TCP, 0);
2117 BNAD_UPDATE_CTR(bnad, tso4);
2118 } else {
2119 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2120
2121 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2122 ipv6h->payload_len = 0;
2123 tcp_hdr(skb)->check =
2124 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2125 IPPROTO_TCP, 0);
2126 BNAD_UPDATE_CTR(bnad, tso6);
2127 }
2128
2129 return 0;
2130}
2131
2132/*
2133 * Initialize Q numbers depending on Rx Paths
2134 * Called with bnad->bna_lock held, because of cfg_flags
2135 * access.
2136 */
2137static void
2138bnad_q_num_init(struct bnad *bnad)
2139{
2140 int rxps;
2141
2142 rxps = min((uint)num_online_cpus(),
2143 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2144
2145 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2146 rxps = 1; /* INTx */
2147
2148 bnad->num_rx = 1;
2149 bnad->num_tx = 1;
2150 bnad->num_rxp_per_rx = rxps;
2151 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2152}
2153
2154/*
2155 * Adjusts the Q numbers, given a number of msix vectors
2156 * Give preference to RSS as opposed to Tx priority Queues,
2157 * in such a case, just use 1 Tx Q
2158 * Called with bnad->bna_lock held b'cos of cfg_flags access
2159 */
2160static void
2161bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2162{
2163 bnad->num_txq_per_tx = 1;
2164 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2165 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2166 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2167 bnad->num_rxp_per_rx = msix_vectors -
2168 (bnad->num_tx * bnad->num_txq_per_tx) -
2169 BNAD_MAILBOX_MSIX_VECTORS;
2170 } else
2171 bnad->num_rxp_per_rx = 1;
2172}
2173
2174/* Enable / disable device */
2175static void
2176bnad_device_disable(struct bnad *bnad)
2177{
2178 unsigned long flags;
2179
2180 init_completion(&bnad->bnad_completions.ioc_comp);
2181
2182 spin_lock_irqsave(&bnad->bna_lock, flags);
2183 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2184 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2185
2186 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2187}
2188
2189static int
2190bnad_device_enable(struct bnad *bnad)
2191{
2192 int err = 0;
2193 unsigned long flags;
2194
2195 init_completion(&bnad->bnad_completions.ioc_comp);
2196
2197 spin_lock_irqsave(&bnad->bna_lock, flags);
2198 bna_device_enable(&bnad->bna.device);
2199 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2200
2201 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2202
2203 if (bnad->bnad_completions.ioc_comp_status)
2204 err = bnad->bnad_completions.ioc_comp_status;
2205
2206 return err;
2207}
2208
2209/* Free BNA resources */
2210static void
2211bnad_res_free(struct bnad *bnad)
2212{
2213 int i;
2214 struct bna_res_info *res_info = &bnad->res_info[0];
2215
2216 for (i = 0; i < BNA_RES_T_MAX; i++) {
2217 if (res_info[i].res_type == BNA_RES_T_MEM)
2218 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2219 else
2220 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2221 }
2222}
2223
2224/* Allocates memory and interrupt resources for BNA */
2225static int
2226bnad_res_alloc(struct bnad *bnad)
2227{
2228 int i, err;
2229 struct bna_res_info *res_info = &bnad->res_info[0];
2230
2231 for (i = 0; i < BNA_RES_T_MAX; i++) {
2232 if (res_info[i].res_type == BNA_RES_T_MEM)
2233 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2234 else
2235 err = bnad_mbox_irq_alloc(bnad,
2236 &res_info[i].res_u.intr_info);
2237 if (err)
2238 goto err_return;
2239 }
2240 return 0;
2241
2242err_return:
2243 bnad_res_free(bnad);
2244 return err;
2245}
2246
2247/* Interrupt enable / disable */
2248static void
2249bnad_enable_msix(struct bnad *bnad)
2250{
2251 int i, ret;
2252 unsigned long flags;
2253
2254 spin_lock_irqsave(&bnad->bna_lock, flags);
2255 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2256 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2257 return;
2258 }
2259 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2260
2261 if (bnad->msix_table)
2262 return;
2263
2264 bnad->msix_table =
2265 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2266
2267 if (!bnad->msix_table)
2268 goto intx_mode;
2269
2270 for (i = 0; i < bnad->msix_num; i++)
2271 bnad->msix_table[i].entry = i;
2272
2273 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2274 if (ret > 0) {
2275 /* Not enough MSI-X vectors. */
2276
2277 spin_lock_irqsave(&bnad->bna_lock, flags);
2278 /* ret = #of vectors that we got */
2279 bnad_q_num_adjust(bnad, ret);
2280 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2281
2282 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2283 + (bnad->num_rx
2284 * bnad->num_rxp_per_rx) +
2285 BNAD_MAILBOX_MSIX_VECTORS;
2286
2287 /* Try once more with adjusted numbers */
2288 /* If this fails, fall back to INTx */
2289 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2290 bnad->msix_num);
2291 if (ret)
2292 goto intx_mode;
2293
2294 } else if (ret < 0)
2295 goto intx_mode;
2296 return;
2297
2298intx_mode:
2299
2300 kfree(bnad->msix_table);
2301 bnad->msix_table = NULL;
2302 bnad->msix_num = 0;
2303 spin_lock_irqsave(&bnad->bna_lock, flags);
2304 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2305 bnad_q_num_init(bnad);
2306 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2307}
2308
2309static void
2310bnad_disable_msix(struct bnad *bnad)
2311{
2312 u32 cfg_flags;
2313 unsigned long flags;
2314
2315 spin_lock_irqsave(&bnad->bna_lock, flags);
2316 cfg_flags = bnad->cfg_flags;
2317 if (bnad->cfg_flags & BNAD_CF_MSIX)
2318 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2319 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2320
2321 if (cfg_flags & BNAD_CF_MSIX) {
2322 pci_disable_msix(bnad->pcidev);
2323 kfree(bnad->msix_table);
2324 bnad->msix_table = NULL;
2325 }
2326}
2327
2328/* Netdev entry points */
2329static int
2330bnad_open(struct net_device *netdev)
2331{
2332 int err;
2333 struct bnad *bnad = netdev_priv(netdev);
2334 struct bna_pause_config pause_config;
2335 int mtu;
2336 unsigned long flags;
2337
2338 mutex_lock(&bnad->conf_mutex);
2339
2340 /* Tx */
2341 err = bnad_setup_tx(bnad, 0);
2342 if (err)
2343 goto err_return;
2344
2345 /* Rx */
2346 err = bnad_setup_rx(bnad, 0);
2347 if (err)
2348 goto cleanup_tx;
2349
2350 /* Port */
2351 pause_config.tx_pause = 0;
2352 pause_config.rx_pause = 0;
2353
2354 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2355
2356 spin_lock_irqsave(&bnad->bna_lock, flags);
2357 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2358 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2359 bna_port_enable(&bnad->bna.port);
2360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2361
2362 /* Enable broadcast */
2363 bnad_enable_default_bcast(bnad);
2364
2365 /* Restore VLANs, if any */
2366 bnad_restore_vlans(bnad, 0);
2367
2368 /* Set the UCAST address */
2369 spin_lock_irqsave(&bnad->bna_lock, flags);
2370 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2371 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2372
2373 /* Start the stats timer */
2374 bnad_stats_timer_start(bnad);
2375
2376 mutex_unlock(&bnad->conf_mutex);
2377
2378 return 0;
2379
2380cleanup_tx:
2381 bnad_cleanup_tx(bnad, 0);
2382
2383err_return:
2384 mutex_unlock(&bnad->conf_mutex);
2385 return err;
2386}
2387
2388static int
2389bnad_stop(struct net_device *netdev)
2390{
2391 struct bnad *bnad = netdev_priv(netdev);
2392 unsigned long flags;
2393
2394 mutex_lock(&bnad->conf_mutex);
2395
2396 /* Stop the stats timer */
2397 bnad_stats_timer_stop(bnad);
2398
2399 init_completion(&bnad->bnad_completions.port_comp);
2400
2401 spin_lock_irqsave(&bnad->bna_lock, flags);
2402 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2403 bnad_cb_port_disabled);
2404 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2405
2406 wait_for_completion(&bnad->bnad_completions.port_comp);
2407
2408 bnad_cleanup_tx(bnad, 0);
2409 bnad_cleanup_rx(bnad, 0);
2410
2411 /* Synchronize mailbox IRQ */
2412 bnad_mbox_irq_sync(bnad);
2413
2414 mutex_unlock(&bnad->conf_mutex);
2415
2416 return 0;
2417}
2418
2419/* TX */
2420/*
2421 * bnad_start_xmit : Netdev entry point for Transmit
2422 * Called under lock held by net_device
2423 */
2424static netdev_tx_t
2425bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2426{
2427 struct bnad *bnad = netdev_priv(netdev);
2428
2429 u16 txq_prod, vlan_tag = 0;
2430 u32 unmap_prod, wis, wis_used, wi_range;
2431 u32 vectors, vect_id, i, acked;
2432 u32 tx_id;
2433 int err;
2434
2435 struct bnad_tx_info *tx_info;
2436 struct bna_tcb *tcb;
2437 struct bnad_unmap_q *unmap_q;
2438 dma_addr_t dma_addr;
2439 struct bna_txq_entry *txqent;
2440 bna_txq_wi_ctrl_flag_t flags;
2441
2442 if (unlikely
2443 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2444 dev_kfree_skb(skb);
2445 return NETDEV_TX_OK;
2446 }
2447
2448 tx_id = 0;
2449
2450 tx_info = &bnad->tx_info[tx_id];
2451 tcb = tx_info->tcb[tx_id];
2452 unmap_q = tcb->unmap_q;
2453
2454 /*
2455 * Takes care of the Tx that is scheduled between clearing the flag
2456 * and the netif_stop_queue() call.
2457 */
2458 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2459 dev_kfree_skb(skb);
2460 return NETDEV_TX_OK;
2461 }
2462
2463 vectors = 1 + skb_shinfo(skb)->nr_frags;
2464 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2465 dev_kfree_skb(skb);
2466 return NETDEV_TX_OK;
2467 }
2468 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2469 acked = 0;
2470 if (unlikely
2471 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2472 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2473 if ((u16) (*tcb->hw_consumer_index) !=
2474 tcb->consumer_index &&
2475 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2476 acked = bnad_free_txbufs(bnad, tcb);
2477 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2478 bna_ib_ack(tcb->i_dbell, acked);
2479 smp_mb__before_clear_bit();
2480 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2481 } else {
2482 netif_stop_queue(netdev);
2483 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2484 }
2485
2486 smp_mb();
2487 /*
2488 * Check again to deal with race condition between
2489 * netif_stop_queue here, and netif_wake_queue in
2490 * interrupt handler which is not inside netif tx lock.
2491 */
2492 if (likely
2493 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2494 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2495 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2496 return NETDEV_TX_BUSY;
2497 } else {
2498 netif_wake_queue(netdev);
2499 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2500 }
2501 }
2502
2503 unmap_prod = unmap_q->producer_index;
2504 wis_used = 1;
2505 vect_id = 0;
2506 flags = 0;
2507
2508 txq_prod = tcb->producer_index;
2509 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2510 BUG_ON(!(wi_range <= tcb->q_depth));
2511 txqent->hdr.wi.reserved = 0;
2512 txqent->hdr.wi.num_vectors = vectors;
2513 txqent->hdr.wi.opcode =
2514 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2515 BNA_TXQ_WI_SEND));
2516
2517 if (vlan_tx_tag_present(skb)) {
2518 vlan_tag = (u16) vlan_tx_tag_get(skb);
2519 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2520 }
2521 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2522 vlan_tag =
2523 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2524 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2525 }
2526
2527 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2528
2529 if (skb_is_gso(skb)) {
2530 err = bnad_tso_prepare(bnad, skb);
2531 if (err) {
2532 dev_kfree_skb(skb);
2533 return NETDEV_TX_OK;
2534 }
2535 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2536 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2537 txqent->hdr.wi.l4_hdr_size_n_offset =
2538 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2539 (tcp_hdrlen(skb) >> 2,
2540 skb_transport_offset(skb)));
2541 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2542 u8 proto = 0;
2543
2544 txqent->hdr.wi.lso_mss = 0;
2545
2546 if (skb->protocol == htons(ETH_P_IP))
2547 proto = ip_hdr(skb)->protocol;
2548 else if (skb->protocol == htons(ETH_P_IPV6)) {
2549 /* nexthdr may not be TCP immediately. */
2550 proto = ipv6_hdr(skb)->nexthdr;
2551 }
2552 if (proto == IPPROTO_TCP) {
2553 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2554 txqent->hdr.wi.l4_hdr_size_n_offset =
2555 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2556 (0, skb_transport_offset(skb)));
2557
2558 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2559
2560 BUG_ON(!(skb_headlen(skb) >=
2561 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2562
2563 } else if (proto == IPPROTO_UDP) {
2564 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2565 txqent->hdr.wi.l4_hdr_size_n_offset =
2566 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2567 (0, skb_transport_offset(skb)));
2568
2569 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2570
2571 BUG_ON(!(skb_headlen(skb) >=
2572 skb_transport_offset(skb) +
2573 sizeof(struct udphdr)));
2574 } else {
2575 err = skb_checksum_help(skb);
2576 BNAD_UPDATE_CTR(bnad, csum_help);
2577 if (err) {
2578 dev_kfree_skb(skb);
2579 BNAD_UPDATE_CTR(bnad, csum_help_err);
2580 return NETDEV_TX_OK;
2581 }
2582 }
2583 } else {
2584 txqent->hdr.wi.lso_mss = 0;
2585 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2586 }
2587
2588 txqent->hdr.wi.flags = htons(flags);
2589
2590 txqent->hdr.wi.frame_length = htonl(skb->len);
2591
2592 unmap_q->unmap_array[unmap_prod].skb = skb;
2593 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2594 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2595 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2596 skb_headlen(skb), DMA_TO_DEVICE);
2597 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2598 dma_addr);
2599
2600 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2601 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2602
2603 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2604 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2605 u32 size = frag->size;
2606
2607 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2608 vect_id = 0;
2609 if (--wi_range)
2610 txqent++;
2611 else {
2612 BNA_QE_INDX_ADD(txq_prod, wis_used,
2613 tcb->q_depth);
2614 wis_used = 0;
2615 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2616 txqent, wi_range);
2617 BUG_ON(!(wi_range <= tcb->q_depth));
2618 }
2619 wis_used++;
2620 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2621 }
2622
2623 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2624 txqent->vector[vect_id].length = htons(size);
2625 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2626 frag->page_offset, size, DMA_TO_DEVICE);
2627 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2628 dma_addr);
2629 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2630 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2631 }
2632
2633 unmap_q->producer_index = unmap_prod;
2634 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2635 tcb->producer_index = txq_prod;
2636
2637 smp_mb();
2638
2639 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2640 return NETDEV_TX_OK;
2641
2642 bna_txq_prod_indx_doorbell(tcb);
2643
2644 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2645 tasklet_schedule(&bnad->tx_free_tasklet);
2646
2647 return NETDEV_TX_OK;
2648}
2649
2650/*
2651 * Used spin_lock to synchronize reading of stats structures, which
2652 * is written by BNA under the same lock.
2653 */
2654static struct rtnl_link_stats64 *
2655bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2656{
2657 struct bnad *bnad = netdev_priv(netdev);
2658 unsigned long flags;
2659
2660 spin_lock_irqsave(&bnad->bna_lock, flags);
2661
2662 bnad_netdev_qstats_fill(bnad, stats);
2663 bnad_netdev_hwstats_fill(bnad, stats);
2664
2665 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2666
2667 return stats;
2668}
2669
2670static void
2671bnad_set_rx_mode(struct net_device *netdev)
2672{
2673 struct bnad *bnad = netdev_priv(netdev);
2674 u32 new_mask, valid_mask;
2675 unsigned long flags;
2676
2677 spin_lock_irqsave(&bnad->bna_lock, flags);
2678
2679 new_mask = valid_mask = 0;
2680
2681 if (netdev->flags & IFF_PROMISC) {
2682 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2683 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2684 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2685 bnad->cfg_flags |= BNAD_CF_PROMISC;
2686 }
2687 } else {
2688 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2689 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2690 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2691 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2692 }
2693 }
2694
2695 if (netdev->flags & IFF_ALLMULTI) {
2696 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2697 new_mask |= BNA_RXMODE_ALLMULTI;
2698 valid_mask |= BNA_RXMODE_ALLMULTI;
2699 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2700 }
2701 } else {
2702 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2703 new_mask &= ~BNA_RXMODE_ALLMULTI;
2704 valid_mask |= BNA_RXMODE_ALLMULTI;
2705 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2706 }
2707 }
2708
2709 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2710
2711 if (!netdev_mc_empty(netdev)) {
2712 u8 *mcaddr_list;
2713 int mc_count = netdev_mc_count(netdev);
2714
2715 /* Index 0 holds the broadcast address */
2716 mcaddr_list =
2717 kzalloc((mc_count + 1) * ETH_ALEN,
2718 GFP_ATOMIC);
2719 if (!mcaddr_list)
2720 goto unlock;
2721
2722 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2723
2724 /* Copy rest of the MC addresses */
2725 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2726
2727 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2728 mcaddr_list, NULL);
2729
2730 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2731 kfree(mcaddr_list);
2732 }
2733unlock:
2734 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2735}
2736
2737/*
2738 * bna_lock is used to sync writes to netdev->addr
2739 * conf_lock cannot be used since this call may be made
2740 * in a non-blocking context.
2741 */
2742static int
2743bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2744{
2745 int err;
2746 struct bnad *bnad = netdev_priv(netdev);
2747 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2748 unsigned long flags;
2749
2750 spin_lock_irqsave(&bnad->bna_lock, flags);
2751
2752 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2753
2754 if (!err)
2755 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2756
2757 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2758
2759 return err;
2760}
2761
2762static int
2763bnad_change_mtu(struct net_device *netdev, int new_mtu)
2764{
2765 int mtu, err = 0;
2766 unsigned long flags;
2767
2768 struct bnad *bnad = netdev_priv(netdev);
2769
2770 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2771 return -EINVAL;
2772
2773 mutex_lock(&bnad->conf_mutex);
2774
2775 netdev->mtu = new_mtu;
2776
2777 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2778
2779 spin_lock_irqsave(&bnad->bna_lock, flags);
2780 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2781 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2782
2783 mutex_unlock(&bnad->conf_mutex);
2784 return err;
2785}
2786
2787static void
2788bnad_vlan_rx_add_vid(struct net_device *netdev,
2789 unsigned short vid)
2790{
2791 struct bnad *bnad = netdev_priv(netdev);
2792 unsigned long flags;
2793
2794 if (!bnad->rx_info[0].rx)
2795 return;
2796
2797 mutex_lock(&bnad->conf_mutex);
2798
2799 spin_lock_irqsave(&bnad->bna_lock, flags);
2800 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2801 set_bit(vid, bnad->active_vlans);
2802 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2803
2804 mutex_unlock(&bnad->conf_mutex);
2805}
2806
2807static void
2808bnad_vlan_rx_kill_vid(struct net_device *netdev,
2809 unsigned short vid)
2810{
2811 struct bnad *bnad = netdev_priv(netdev);
2812 unsigned long flags;
2813
2814 if (!bnad->rx_info[0].rx)
2815 return;
2816
2817 mutex_lock(&bnad->conf_mutex);
2818
2819 spin_lock_irqsave(&bnad->bna_lock, flags);
2820 clear_bit(vid, bnad->active_vlans);
2821 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2822 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2823
2824 mutex_unlock(&bnad->conf_mutex);
2825}
2826
2827#ifdef CONFIG_NET_POLL_CONTROLLER
2828static void
2829bnad_netpoll(struct net_device *netdev)
2830{
2831 struct bnad *bnad = netdev_priv(netdev);
2832 struct bnad_rx_info *rx_info;
2833 struct bnad_rx_ctrl *rx_ctrl;
2834 u32 curr_mask;
2835 int i, j;
2836
2837 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2838 bna_intx_disable(&bnad->bna, curr_mask);
2839 bnad_isr(bnad->pcidev->irq, netdev);
2840 bna_intx_enable(&bnad->bna, curr_mask);
2841 } else {
2842 for (i = 0; i < bnad->num_rx; i++) {
2843 rx_info = &bnad->rx_info[i];
2844 if (!rx_info->rx)
2845 continue;
2846 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2847 rx_ctrl = &rx_info->rx_ctrl[j];
2848 if (rx_ctrl->ccb) {
2849 bnad_disable_rx_irq(bnad,
2850 rx_ctrl->ccb);
2851 bnad_netif_rx_schedule_poll(bnad,
2852 rx_ctrl->ccb);
2853 }
2854 }
2855 }
2856 }
2857}
2858#endif
2859
2860static const struct net_device_ops bnad_netdev_ops = {
2861 .ndo_open = bnad_open,
2862 .ndo_stop = bnad_stop,
2863 .ndo_start_xmit = bnad_start_xmit,
2864 .ndo_get_stats64 = bnad_get_stats64,
2865 .ndo_set_rx_mode = bnad_set_rx_mode,
2866 .ndo_set_multicast_list = bnad_set_rx_mode,
2867 .ndo_validate_addr = eth_validate_addr,
2868 .ndo_set_mac_address = bnad_set_mac_address,
2869 .ndo_change_mtu = bnad_change_mtu,
2870 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2871 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2872#ifdef CONFIG_NET_POLL_CONTROLLER
2873 .ndo_poll_controller = bnad_netpoll
2874#endif
2875};
2876
2877static void
2878bnad_netdev_init(struct bnad *bnad, bool using_dac)
2879{
2880 struct net_device *netdev = bnad->netdev;
2881
2882 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2883 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2884 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
2885
2886 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2887 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2888 NETIF_F_TSO | NETIF_F_TSO6;
2889
2890 netdev->features |= netdev->hw_features |
2891 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2892
2893 if (using_dac)
2894 netdev->features |= NETIF_F_HIGHDMA;
2895
2896 netdev->mem_start = bnad->mmio_start;
2897 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2898
2899 netdev->netdev_ops = &bnad_netdev_ops;
2900 bnad_set_ethtool_ops(netdev);
2901}
2902
2903/*
2904 * 1. Initialize the bnad structure
2905 * 2. Setup netdev pointer in pci_dev
2906 * 3. Initialze Tx free tasklet
2907 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2908 */
2909static int
2910bnad_init(struct bnad *bnad,
2911 struct pci_dev *pdev, struct net_device *netdev)
2912{
2913 unsigned long flags;
2914
2915 SET_NETDEV_DEV(netdev, &pdev->dev);
2916 pci_set_drvdata(pdev, netdev);
2917
2918 bnad->netdev = netdev;
2919 bnad->pcidev = pdev;
2920 bnad->mmio_start = pci_resource_start(pdev, 0);
2921 bnad->mmio_len = pci_resource_len(pdev, 0);
2922 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2923 if (!bnad->bar0) {
2924 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2925 pci_set_drvdata(pdev, NULL);
2926 return -ENOMEM;
2927 }
2928 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2929 (unsigned long long) bnad->mmio_len);
2930
2931 spin_lock_irqsave(&bnad->bna_lock, flags);
2932 if (!bnad_msix_disable)
2933 bnad->cfg_flags = BNAD_CF_MSIX;
2934
2935 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2936
2937 bnad_q_num_init(bnad);
2938 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2939
2940 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2941 (bnad->num_rx * bnad->num_rxp_per_rx) +
2942 BNAD_MAILBOX_MSIX_VECTORS;
2943
2944 bnad->txq_depth = BNAD_TXQ_DEPTH;
2945 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2946
2947 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2948 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2949
2950 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2951 (unsigned long)bnad);
2952
2953 return 0;
2954}
2955
2956/*
2957 * Must be called after bnad_pci_uninit()
2958 * so that iounmap() and pci_set_drvdata(NULL)
2959 * happens only after PCI uninitialization.
2960 */
2961static void
2962bnad_uninit(struct bnad *bnad)
2963{
2964 if (bnad->bar0)
2965 iounmap(bnad->bar0);
2966 pci_set_drvdata(bnad->pcidev, NULL);
2967}
2968
2969/*
2970 * Initialize locks
2971 a) Per device mutes used for serializing configuration
2972 changes from OS interface
2973 b) spin lock used to protect bna state machine
2974 */
2975static void
2976bnad_lock_init(struct bnad *bnad)
2977{
2978 spin_lock_init(&bnad->bna_lock);
2979 mutex_init(&bnad->conf_mutex);
2980}
2981
2982static void
2983bnad_lock_uninit(struct bnad *bnad)
2984{
2985 mutex_destroy(&bnad->conf_mutex);
2986}
2987
2988/* PCI Initialization */
2989static int
2990bnad_pci_init(struct bnad *bnad,
2991 struct pci_dev *pdev, bool *using_dac)
2992{
2993 int err;
2994
2995 err = pci_enable_device(pdev);
2996 if (err)
2997 return err;
2998 err = pci_request_regions(pdev, BNAD_NAME);
2999 if (err)
3000 goto disable_device;
3001 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3002 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3003 *using_dac = 1;
3004 } else {
3005 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3006 if (err) {
3007 err = dma_set_coherent_mask(&pdev->dev,
3008 DMA_BIT_MASK(32));
3009 if (err)
3010 goto release_regions;
3011 }
3012 *using_dac = 0;
3013 }
3014 pci_set_master(pdev);
3015 return 0;
3016
3017release_regions:
3018 pci_release_regions(pdev);
3019disable_device:
3020 pci_disable_device(pdev);
3021
3022 return err;
3023}
3024
3025static void
3026bnad_pci_uninit(struct pci_dev *pdev)
3027{
3028 pci_release_regions(pdev);
3029 pci_disable_device(pdev);
3030}
3031
3032static int __devinit
3033bnad_pci_probe(struct pci_dev *pdev,
3034 const struct pci_device_id *pcidev_id)
3035{
3036 bool using_dac = false;
3037 int err;
3038 struct bnad *bnad;
3039 struct bna *bna;
3040 struct net_device *netdev;
3041 struct bfa_pcidev pcidev_info;
3042 unsigned long flags;
3043
3044 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3045 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3046
3047 mutex_lock(&bnad_fwimg_mutex);
3048 if (!cna_get_firmware_buf(pdev)) {
3049 mutex_unlock(&bnad_fwimg_mutex);
3050 pr_warn("Failed to load Firmware Image!\n");
3051 return -ENODEV;
3052 }
3053 mutex_unlock(&bnad_fwimg_mutex);
3054
3055 /*
3056 * Allocates sizeof(struct net_device + struct bnad)
3057 * bnad = netdev->priv
3058 */
3059 netdev = alloc_etherdev(sizeof(struct bnad));
3060 if (!netdev) {
3061 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3062 err = -ENOMEM;
3063 return err;
3064 }
3065 bnad = netdev_priv(netdev);
3066
3067 /*
3068 * PCI initialization
3069 * Output : using_dac = 1 for 64 bit DMA
3070 * = 0 for 32 bit DMA
3071 */
3072 err = bnad_pci_init(bnad, pdev, &using_dac);
3073 if (err)
3074 goto free_netdev;
3075
3076 bnad_lock_init(bnad);
3077 /*
3078 * Initialize bnad structure
3079 * Setup relation between pci_dev & netdev
3080 * Init Tx free tasklet
3081 */
3082 err = bnad_init(bnad, pdev, netdev);
3083 if (err)
3084 goto pci_uninit;
3085 /* Initialize netdev structure, set up ethtool ops */
3086 bnad_netdev_init(bnad, using_dac);
3087
3088 /* Set link to down state */
3089 netif_carrier_off(netdev);
3090
3091 bnad_enable_msix(bnad);
3092
3093 /* Get resource requirement form bna */
3094 bna_res_req(&bnad->res_info[0]);
3095
3096 /* Allocate resources from bna */
3097 err = bnad_res_alloc(bnad);
3098 if (err)
3099 goto free_netdev;
3100
3101 bna = &bnad->bna;
3102
3103 /* Setup pcidev_info for bna_init() */
3104 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3105 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3106 pcidev_info.device_id = bnad->pcidev->device;
3107 pcidev_info.pci_bar_kva = bnad->bar0;
3108
3109 mutex_lock(&bnad->conf_mutex);
3110
3111 spin_lock_irqsave(&bnad->bna_lock, flags);
3112 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3113 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3114
3115 bnad->stats.bna_stats = &bna->stats;
3116
3117 /* Set up timers */
3118 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3119 ((unsigned long)bnad));
3120 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3121 ((unsigned long)bnad));
3122 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3123 ((unsigned long)bnad));
3124 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
3125 ((unsigned long)bnad));
3126
3127 /* Now start the timer before calling IOC */
3128 mod_timer(&bnad->bna.device.ioc.iocpf_timer,
3129 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3130
3131 /*
3132 * Start the chip
3133 * Don't care even if err != 0, bna state machine will
3134 * deal with it
3135 */
3136 err = bnad_device_enable(bnad);
3137
3138 /* Get the burnt-in mac */
3139 spin_lock_irqsave(&bnad->bna_lock, flags);
3140 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3141 bnad_set_netdev_perm_addr(bnad);
3142 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3143
3144 mutex_unlock(&bnad->conf_mutex);
3145
3146 /* Finally, reguister with net_device layer */
3147 err = register_netdev(netdev);
3148 if (err) {
3149 pr_err("BNA : Registering with netdev failed\n");
3150 goto disable_device;
3151 }
3152
3153 return 0;
3154
3155disable_device:
3156 mutex_lock(&bnad->conf_mutex);
3157 bnad_device_disable(bnad);
3158 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3159 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3160 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3161 spin_lock_irqsave(&bnad->bna_lock, flags);
3162 bna_uninit(bna);
3163 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3164 mutex_unlock(&bnad->conf_mutex);
3165
3166 bnad_res_free(bnad);
3167 bnad_disable_msix(bnad);
3168pci_uninit:
3169 bnad_pci_uninit(pdev);
3170 bnad_lock_uninit(bnad);
3171 bnad_uninit(bnad);
3172free_netdev:
3173 free_netdev(netdev);
3174 return err;
3175}
3176
3177static void __devexit
3178bnad_pci_remove(struct pci_dev *pdev)
3179{
3180 struct net_device *netdev = pci_get_drvdata(pdev);
3181 struct bnad *bnad;
3182 struct bna *bna;
3183 unsigned long flags;
3184
3185 if (!netdev)
3186 return;
3187
3188 pr_info("%s bnad_pci_remove\n", netdev->name);
3189 bnad = netdev_priv(netdev);
3190 bna = &bnad->bna;
3191
3192 unregister_netdev(netdev);
3193
3194 mutex_lock(&bnad->conf_mutex);
3195 bnad_device_disable(bnad);
3196 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3197 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3198 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3199 spin_lock_irqsave(&bnad->bna_lock, flags);
3200 bna_uninit(bna);
3201 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3202 mutex_unlock(&bnad->conf_mutex);
3203
3204 bnad_res_free(bnad);
3205 bnad_disable_msix(bnad);
3206 bnad_pci_uninit(pdev);
3207 bnad_lock_uninit(bnad);
3208 bnad_uninit(bnad);
3209 free_netdev(netdev);
3210}
3211
3212static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3213 {
3214 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3215 PCI_DEVICE_ID_BROCADE_CT),
3216 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3217 .class_mask = 0xffff00
3218 }, {0, }
3219};
3220
3221MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3222
3223static struct pci_driver bnad_pci_driver = {
3224 .name = BNAD_NAME,
3225 .id_table = bnad_pci_id_table,
3226 .probe = bnad_pci_probe,
3227 .remove = __devexit_p(bnad_pci_remove),
3228};
3229
3230static int __init
3231bnad_module_init(void)
3232{
3233 int err;
3234
3235 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3236 BNAD_VERSION);
3237
3238 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3239
3240 err = pci_register_driver(&bnad_pci_driver);
3241 if (err < 0) {
3242 pr_err("bna : PCI registration failed in module init "
3243 "(%d)\n", err);
3244 return err;
3245 }
3246
3247 return 0;
3248}
3249
3250static void __exit
3251bnad_module_exit(void)
3252{
3253 pci_unregister_driver(&bnad_pci_driver);
3254
3255 if (bfi_fw)
3256 release_firmware(bfi_fw);
3257}
3258
3259module_init(bnad_module_init);
3260module_exit(bnad_module_exit);
3261
3262MODULE_AUTHOR("Brocade");
3263MODULE_LICENSE("GPL");
3264MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3265MODULE_VERSION(BNAD_VERSION);
3266MODULE_FIRMWARE(CNA_FW_FILE_CT);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
new file mode 100644
index 000000000000..458eb30371b5
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -0,0 +1,341 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BNAD_H__
19#define __BNAD_H__
20
21#include <linux/rtnetlink.h>
22#include <linux/workqueue.h>
23#include <linux/ipv6.h>
24#include <linux/etherdevice.h>
25#include <linux/mutex.h>
26#include <linux/firmware.h>
27#include <linux/if_vlan.h>
28
29/* Fix for IA64 */
30#include <asm/checksum.h>
31#include <net/ip6_checksum.h>
32
33#include <net/ip.h>
34#include <net/tcp.h>
35
36#include "bna.h"
37
38#define BNAD_TXQ_DEPTH 2048
39#define BNAD_RXQ_DEPTH 2048
40
41#define BNAD_MAX_TXS 1
42#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
43#define BNAD_TXQ_NUM 1
44
45#define BNAD_MAX_RXS 1
46#define BNAD_MAX_RXPS_PER_RX 16
47
48/*
49 * Control structure pointed to ccb->ctrl, which
50 * determines the NAPI / LRO behavior CCB
51 * There is 1:1 corres. between ccb & ctrl
52 */
53struct bnad_rx_ctrl {
54 struct bna_ccb *ccb;
55 unsigned long flags;
56 struct napi_struct napi;
57};
58
59#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
60
61#define BNAD_GET_TX_ID(_skb) (0)
62
63/*
64 * GLOBAL #defines (CONSTANTS)
65 */
66#define BNAD_NAME "bna"
67#define BNAD_NAME_LEN 64
68
69#define BNAD_VERSION "2.3.2.3"
70
71#define BNAD_MAILBOX_MSIX_INDEX 0
72#define BNAD_MAILBOX_MSIX_VECTORS 1
73#define BNAD_INTX_TX_IB_BITMASK 0x1
74#define BNAD_INTX_RX_IB_BITMASK 0x2
75
76#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
77#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
78
79#define BNAD_MAX_Q_DEPTH 0x10000
80#define BNAD_MIN_Q_DEPTH 0x200
81
82#define BNAD_JUMBO_MTU 9000
83
84#define BNAD_NETIF_WAKE_THRESHOLD 8
85
86#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
87
88/* Bit positions for tcb->flags */
89#define BNAD_TXQ_FREE_SENT 0
90#define BNAD_TXQ_TX_STARTED 1
91
92/* Bit positions for rcb->flags */
93#define BNAD_RXQ_REFILL 0
94#define BNAD_RXQ_STARTED 1
95
96/*
97 * DATA STRUCTURES
98 */
99
100/* enums */
101enum bnad_intr_source {
102 BNAD_INTR_TX = 1,
103 BNAD_INTR_RX = 2
104};
105
106enum bnad_link_state {
107 BNAD_LS_DOWN = 0,
108 BNAD_LS_UP = 1
109};
110
111struct bnad_completion {
112 struct completion ioc_comp;
113 struct completion ucast_comp;
114 struct completion mcast_comp;
115 struct completion tx_comp;
116 struct completion rx_comp;
117 struct completion stats_comp;
118 struct completion port_comp;
119
120 u8 ioc_comp_status;
121 u8 ucast_comp_status;
122 u8 mcast_comp_status;
123 u8 tx_comp_status;
124 u8 rx_comp_status;
125 u8 stats_comp_status;
126 u8 port_comp_status;
127};
128
129/* Tx Rx Control Stats */
130struct bnad_drv_stats {
131 u64 netif_queue_stop;
132 u64 netif_queue_wakeup;
133 u64 netif_queue_stopped;
134 u64 tso4;
135 u64 tso6;
136 u64 tso_err;
137 u64 tcpcsum_offload;
138 u64 udpcsum_offload;
139 u64 csum_help;
140 u64 csum_help_err;
141
142 u64 hw_stats_updates;
143 u64 netif_rx_schedule;
144 u64 netif_rx_complete;
145 u64 netif_rx_dropped;
146
147 u64 link_toggle;
148 u64 cee_up;
149
150 u64 rxp_info_alloc_failed;
151 u64 mbox_intr_disabled;
152 u64 mbox_intr_enabled;
153 u64 tx_unmap_q_alloc_failed;
154 u64 rx_unmap_q_alloc_failed;
155
156 u64 rxbuf_alloc_failed;
157};
158
159/* Complete driver stats */
160struct bnad_stats {
161 struct bnad_drv_stats drv_stats;
162 struct bna_stats *bna_stats;
163};
164
165/* Tx / Rx Resources */
166struct bnad_tx_res_info {
167 struct bna_res_info res_info[BNA_TX_RES_T_MAX];
168};
169
170struct bnad_rx_res_info {
171 struct bna_res_info res_info[BNA_RX_RES_T_MAX];
172};
173
174struct bnad_tx_info {
175 struct bna_tx *tx; /* 1:1 between tx_info & tx */
176 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
177} ____cacheline_aligned;
178
179struct bnad_rx_info {
180 struct bna_rx *rx; /* 1:1 between rx_info & rx */
181
182 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
183} ____cacheline_aligned;
184
185/* Unmap queues for Tx / Rx cleanup */
186struct bnad_skb_unmap {
187 struct sk_buff *skb;
188 DEFINE_DMA_UNMAP_ADDR(dma_addr);
189};
190
191struct bnad_unmap_q {
192 u32 producer_index;
193 u32 consumer_index;
194 u32 q_depth;
195 /* This should be the last one */
196 struct bnad_skb_unmap unmap_array[1];
197};
198
199/* Bit mask values for bnad->cfg_flags */
200#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
201#define BNAD_CF_PROMISC 0x02
202#define BNAD_CF_ALLMULTI 0x04
203#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
204
205/* Defines for run_flags bit-mask */
206/* Set, tested & cleared using xxx_bit() functions */
207/* Values indicated bit positions */
208#define BNAD_RF_CEE_RUNNING 1
209#define BNAD_RF_MBOX_IRQ_DISABLED 2
210#define BNAD_RF_RX_STARTED 3
211#define BNAD_RF_DIM_TIMER_RUNNING 4
212#define BNAD_RF_STATS_TIMER_RUNNING 5
213#define BNAD_RF_TX_SHUTDOWN_DELAYED 6
214#define BNAD_RF_RX_SHUTDOWN_DELAYED 7
215
216struct bnad {
217 struct net_device *netdev;
218
219 /* Data path */
220 struct bnad_tx_info tx_info[BNAD_MAX_TXS];
221 struct bnad_rx_info rx_info[BNAD_MAX_RXS];
222
223 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
224 /*
225 * These q numbers are global only because
226 * they are used to calculate MSIx vectors.
227 * Actually the exact # of queues are per Tx/Rx
228 * object.
229 */
230 u32 num_tx;
231 u32 num_rx;
232 u32 num_txq_per_tx;
233 u32 num_rxp_per_rx;
234
235 u32 txq_depth;
236 u32 rxq_depth;
237
238 u8 tx_coalescing_timeo;
239 u8 rx_coalescing_timeo;
240
241 struct bna_rx_config rx_config[BNAD_MAX_RXS];
242 struct bna_tx_config tx_config[BNAD_MAX_TXS];
243
244 void __iomem *bar0; /* BAR0 address */
245
246 struct bna bna;
247
248 u32 cfg_flags;
249 unsigned long run_flags;
250
251 struct pci_dev *pcidev;
252 u64 mmio_start;
253 u64 mmio_len;
254
255 u32 msix_num;
256 struct msix_entry *msix_table;
257
258 struct mutex conf_mutex;
259 spinlock_t bna_lock ____cacheline_aligned;
260
261 /* Timers */
262 struct timer_list ioc_timer;
263 struct timer_list dim_timer;
264 struct timer_list stats_timer;
265
266 /* Control path resources, memory & irq */
267 struct bna_res_info res_info[BNA_RES_T_MAX];
268 struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
269 struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
270
271 struct bnad_completion bnad_completions;
272
273 /* Burnt in MAC address */
274 mac_t perm_addr;
275
276 struct tasklet_struct tx_free_tasklet;
277
278 /* Statistics */
279 struct bnad_stats stats;
280
281 struct bnad_diag *diag;
282
283 char adapter_name[BNAD_NAME_LEN];
284 char port_name[BNAD_NAME_LEN];
285 char mbox_irq_name[BNAD_NAME_LEN];
286};
287
288/*
289 * EXTERN VARIABLES
290 */
291extern struct firmware *bfi_fw;
292extern u32 bnad_rxqs_per_cq;
293
294/*
295 * EXTERN PROTOTYPES
296 */
297extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
298/* Netdev entry point prototypes */
299extern void bnad_set_ethtool_ops(struct net_device *netdev);
300
301/* Configuration & setup */
302extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
303extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
304
305extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
306extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
307extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
308extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
309
310/* Timer start/stop protos */
311extern void bnad_dim_timer_start(struct bnad *bnad);
312
313/* Statistics */
314extern void bnad_netdev_qstats_fill(struct bnad *bnad,
315 struct rtnl_link_stats64 *stats);
316extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
317 struct rtnl_link_stats64 *stats);
318
319/**
320 * MACROS
321 */
322/* To set & get the stats counters */
323#define BNAD_UPDATE_CTR(_bnad, _ctr) \
324 (((_bnad)->stats.drv_stats._ctr)++)
325
326#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
327
328#define bnad_enable_rx_irq_unsafe(_ccb) \
329{ \
330 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
331 bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
332 (_ccb)->rx_coalescing_timeo); \
333 bna_ib_ack((_ccb)->i_dbell, 0); \
334 } \
335}
336
337#define bnad_dim_timer_running(_bnad) \
338 (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
339 (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
340
341#endif /* __BNAD_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
new file mode 100644
index 000000000000..49174f87f4d1
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -0,0 +1,895 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "cna.h"
20
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/ethtool.h>
24#include <linux/rtnetlink.h>
25
26#include "bna.h"
27
28#include "bnad.h"
29
30#define BNAD_NUM_TXF_COUNTERS 12
31#define BNAD_NUM_RXF_COUNTERS 10
32#define BNAD_NUM_CQ_COUNTERS 3
33#define BNAD_NUM_RXQ_COUNTERS 6
34#define BNAD_NUM_TXQ_COUNTERS 5
35
36#define BNAD_ETHTOOL_STATS_NUM \
37 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
38 sizeof(struct bnad_drv_stats) / sizeof(u64) + \
39 offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
40
41static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
42 "rx_packets",
43 "tx_packets",
44 "rx_bytes",
45 "tx_bytes",
46 "rx_errors",
47 "tx_errors",
48 "rx_dropped",
49 "tx_dropped",
50 "multicast",
51 "collisions",
52
53 "rx_length_errors",
54 "rx_over_errors",
55 "rx_crc_errors",
56 "rx_frame_errors",
57 "rx_fifo_errors",
58 "rx_missed_errors",
59
60 "tx_aborted_errors",
61 "tx_carrier_errors",
62 "tx_fifo_errors",
63 "tx_heartbeat_errors",
64 "tx_window_errors",
65
66 "rx_compressed",
67 "tx_compressed",
68
69 "netif_queue_stop",
70 "netif_queue_wakeup",
71 "netif_queue_stopped",
72 "tso4",
73 "tso6",
74 "tso_err",
75 "tcpcsum_offload",
76 "udpcsum_offload",
77 "csum_help",
78 "csum_help_err",
79 "hw_stats_updates",
80 "netif_rx_schedule",
81 "netif_rx_complete",
82 "netif_rx_dropped",
83
84 "link_toggle",
85 "cee_up",
86
87 "rxp_info_alloc_failed",
88 "mbox_intr_disabled",
89 "mbox_intr_enabled",
90 "tx_unmap_q_alloc_failed",
91 "rx_unmap_q_alloc_failed",
92 "rxbuf_alloc_failed",
93
94 "mac_frame_64",
95 "mac_frame_65_127",
96 "mac_frame_128_255",
97 "mac_frame_256_511",
98 "mac_frame_512_1023",
99 "mac_frame_1024_1518",
100 "mac_frame_1518_1522",
101 "mac_rx_bytes",
102 "mac_rx_packets",
103 "mac_rx_fcs_error",
104 "mac_rx_multicast",
105 "mac_rx_broadcast",
106 "mac_rx_control_frames",
107 "mac_rx_pause",
108 "mac_rx_unknown_opcode",
109 "mac_rx_alignment_error",
110 "mac_rx_frame_length_error",
111 "mac_rx_code_error",
112 "mac_rx_carrier_sense_error",
113 "mac_rx_undersize",
114 "mac_rx_oversize",
115 "mac_rx_fragments",
116 "mac_rx_jabber",
117 "mac_rx_drop",
118
119 "mac_tx_bytes",
120 "mac_tx_packets",
121 "mac_tx_multicast",
122 "mac_tx_broadcast",
123 "mac_tx_pause",
124 "mac_tx_deferral",
125 "mac_tx_excessive_deferral",
126 "mac_tx_single_collision",
127 "mac_tx_muliple_collision",
128 "mac_tx_late_collision",
129 "mac_tx_excessive_collision",
130 "mac_tx_total_collision",
131 "mac_tx_pause_honored",
132 "mac_tx_drop",
133 "mac_tx_jabber",
134 "mac_tx_fcs_error",
135 "mac_tx_control_frame",
136 "mac_tx_oversize",
137 "mac_tx_undersize",
138 "mac_tx_fragments",
139
140 "bpc_tx_pause_0",
141 "bpc_tx_pause_1",
142 "bpc_tx_pause_2",
143 "bpc_tx_pause_3",
144 "bpc_tx_pause_4",
145 "bpc_tx_pause_5",
146 "bpc_tx_pause_6",
147 "bpc_tx_pause_7",
148 "bpc_tx_zero_pause_0",
149 "bpc_tx_zero_pause_1",
150 "bpc_tx_zero_pause_2",
151 "bpc_tx_zero_pause_3",
152 "bpc_tx_zero_pause_4",
153 "bpc_tx_zero_pause_5",
154 "bpc_tx_zero_pause_6",
155 "bpc_tx_zero_pause_7",
156 "bpc_tx_first_pause_0",
157 "bpc_tx_first_pause_1",
158 "bpc_tx_first_pause_2",
159 "bpc_tx_first_pause_3",
160 "bpc_tx_first_pause_4",
161 "bpc_tx_first_pause_5",
162 "bpc_tx_first_pause_6",
163 "bpc_tx_first_pause_7",
164
165 "bpc_rx_pause_0",
166 "bpc_rx_pause_1",
167 "bpc_rx_pause_2",
168 "bpc_rx_pause_3",
169 "bpc_rx_pause_4",
170 "bpc_rx_pause_5",
171 "bpc_rx_pause_6",
172 "bpc_rx_pause_7",
173 "bpc_rx_zero_pause_0",
174 "bpc_rx_zero_pause_1",
175 "bpc_rx_zero_pause_2",
176 "bpc_rx_zero_pause_3",
177 "bpc_rx_zero_pause_4",
178 "bpc_rx_zero_pause_5",
179 "bpc_rx_zero_pause_6",
180 "bpc_rx_zero_pause_7",
181 "bpc_rx_first_pause_0",
182 "bpc_rx_first_pause_1",
183 "bpc_rx_first_pause_2",
184 "bpc_rx_first_pause_3",
185 "bpc_rx_first_pause_4",
186 "bpc_rx_first_pause_5",
187 "bpc_rx_first_pause_6",
188 "bpc_rx_first_pause_7",
189
190 "rad_rx_frames",
191 "rad_rx_octets",
192 "rad_rx_vlan_frames",
193 "rad_rx_ucast",
194 "rad_rx_ucast_octets",
195 "rad_rx_ucast_vlan",
196 "rad_rx_mcast",
197 "rad_rx_mcast_octets",
198 "rad_rx_mcast_vlan",
199 "rad_rx_bcast",
200 "rad_rx_bcast_octets",
201 "rad_rx_bcast_vlan",
202 "rad_rx_drops",
203
204 "fc_rx_ucast_octets",
205 "fc_rx_ucast",
206 "fc_rx_ucast_vlan",
207 "fc_rx_mcast_octets",
208 "fc_rx_mcast",
209 "fc_rx_mcast_vlan",
210 "fc_rx_bcast_octets",
211 "fc_rx_bcast",
212 "fc_rx_bcast_vlan",
213
214 "fc_tx_ucast_octets",
215 "fc_tx_ucast",
216 "fc_tx_ucast_vlan",
217 "fc_tx_mcast_octets",
218 "fc_tx_mcast",
219 "fc_tx_mcast_vlan",
220 "fc_tx_bcast_octets",
221 "fc_tx_bcast",
222 "fc_tx_bcast_vlan",
223 "fc_tx_parity_errors",
224 "fc_tx_timeout",
225 "fc_tx_fid_parity_errors",
226};
227
228static int
229bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
230{
231 cmd->supported = SUPPORTED_10000baseT_Full;
232 cmd->advertising = ADVERTISED_10000baseT_Full;
233 cmd->autoneg = AUTONEG_DISABLE;
234 cmd->supported |= SUPPORTED_FIBRE;
235 cmd->advertising |= ADVERTISED_FIBRE;
236 cmd->port = PORT_FIBRE;
237 cmd->phy_address = 0;
238
239 if (netif_carrier_ok(netdev)) {
240 ethtool_cmd_speed_set(cmd, SPEED_10000);
241 cmd->duplex = DUPLEX_FULL;
242 } else {
243 ethtool_cmd_speed_set(cmd, -1);
244 cmd->duplex = -1;
245 }
246 cmd->transceiver = XCVR_EXTERNAL;
247 cmd->maxtxpkt = 0;
248 cmd->maxrxpkt = 0;
249
250 return 0;
251}
252
253static int
254bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
255{
256 /* 10G full duplex setting supported only */
257 if (cmd->autoneg == AUTONEG_ENABLE)
258 return -EOPNOTSUPP; else {
259 if ((ethtool_cmd_speed(cmd) == SPEED_10000)
260 && (cmd->duplex == DUPLEX_FULL))
261 return 0;
262 }
263
264 return -EOPNOTSUPP;
265}
266
267static void
268bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
269{
270 struct bnad *bnad = netdev_priv(netdev);
271 struct bfa_ioc_attr *ioc_attr;
272 unsigned long flags;
273
274 strcpy(drvinfo->driver, BNAD_NAME);
275 strcpy(drvinfo->version, BNAD_VERSION);
276
277 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
278 if (ioc_attr) {
279 spin_lock_irqsave(&bnad->bna_lock, flags);
280 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
281 spin_unlock_irqrestore(&bnad->bna_lock, flags);
282
283 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
284 sizeof(drvinfo->fw_version) - 1);
285 kfree(ioc_attr);
286 }
287
288 strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
289}
290
291static void
292bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
293{
294 wolinfo->supported = 0;
295 wolinfo->wolopts = 0;
296}
297
298static int
299bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
300{
301 struct bnad *bnad = netdev_priv(netdev);
302 unsigned long flags;
303
304 /* Lock rqd. to access bnad->bna_lock */
305 spin_lock_irqsave(&bnad->bna_lock, flags);
306 coalesce->use_adaptive_rx_coalesce =
307 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
308 spin_unlock_irqrestore(&bnad->bna_lock, flags);
309
310 coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
311 BFI_COALESCING_TIMER_UNIT;
312 coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
313 BFI_COALESCING_TIMER_UNIT;
314 coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
315
316 return 0;
317}
318
319static int
320bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
321{
322 struct bnad *bnad = netdev_priv(netdev);
323 unsigned long flags;
324 int dim_timer_del = 0;
325
326 if (coalesce->rx_coalesce_usecs == 0 ||
327 coalesce->rx_coalesce_usecs >
328 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
329 return -EINVAL;
330
331 if (coalesce->tx_coalesce_usecs == 0 ||
332 coalesce->tx_coalesce_usecs >
333 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
334 return -EINVAL;
335
336 mutex_lock(&bnad->conf_mutex);
337 /*
338 * Do not need to store rx_coalesce_usecs here
339 * Every time DIM is disabled, we can get it from the
340 * stack.
341 */
342 spin_lock_irqsave(&bnad->bna_lock, flags);
343 if (coalesce->use_adaptive_rx_coalesce) {
344 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
345 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
346 bnad_dim_timer_start(bnad);
347 }
348 } else {
349 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
350 bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
351 dim_timer_del = bnad_dim_timer_running(bnad);
352 if (dim_timer_del) {
353 clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
354 &bnad->run_flags);
355 spin_unlock_irqrestore(&bnad->bna_lock, flags);
356 del_timer_sync(&bnad->dim_timer);
357 spin_lock_irqsave(&bnad->bna_lock, flags);
358 }
359 bnad_rx_coalescing_timeo_set(bnad);
360 }
361 }
362 if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
363 BFI_COALESCING_TIMER_UNIT) {
364 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
365 BFI_COALESCING_TIMER_UNIT;
366 bnad_tx_coalescing_timeo_set(bnad);
367 }
368
369 if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
370 BFI_COALESCING_TIMER_UNIT) {
371 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
372 BFI_COALESCING_TIMER_UNIT;
373
374 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
375 bnad_rx_coalescing_timeo_set(bnad);
376
377 }
378
379 /* Add Tx Inter-pkt DMA count? */
380
381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
382
383 mutex_unlock(&bnad->conf_mutex);
384 return 0;
385}
386
387static void
388bnad_get_ringparam(struct net_device *netdev,
389 struct ethtool_ringparam *ringparam)
390{
391 struct bnad *bnad = netdev_priv(netdev);
392
393 ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
394 ringparam->rx_mini_max_pending = 0;
395 ringparam->rx_jumbo_max_pending = 0;
396 ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
397
398 ringparam->rx_pending = bnad->rxq_depth;
399 ringparam->rx_mini_max_pending = 0;
400 ringparam->rx_jumbo_max_pending = 0;
401 ringparam->tx_pending = bnad->txq_depth;
402}
403
404static int
405bnad_set_ringparam(struct net_device *netdev,
406 struct ethtool_ringparam *ringparam)
407{
408 int i, current_err, err = 0;
409 struct bnad *bnad = netdev_priv(netdev);
410
411 mutex_lock(&bnad->conf_mutex);
412 if (ringparam->rx_pending == bnad->rxq_depth &&
413 ringparam->tx_pending == bnad->txq_depth) {
414 mutex_unlock(&bnad->conf_mutex);
415 return 0;
416 }
417
418 if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
419 ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
420 !BNA_POWER_OF_2(ringparam->rx_pending)) {
421 mutex_unlock(&bnad->conf_mutex);
422 return -EINVAL;
423 }
424 if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
425 ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
426 !BNA_POWER_OF_2(ringparam->tx_pending)) {
427 mutex_unlock(&bnad->conf_mutex);
428 return -EINVAL;
429 }
430
431 if (ringparam->rx_pending != bnad->rxq_depth) {
432 bnad->rxq_depth = ringparam->rx_pending;
433 for (i = 0; i < bnad->num_rx; i++) {
434 if (!bnad->rx_info[i].rx)
435 continue;
436 bnad_cleanup_rx(bnad, i);
437 current_err = bnad_setup_rx(bnad, i);
438 if (current_err && !err)
439 err = current_err;
440 }
441 }
442 if (ringparam->tx_pending != bnad->txq_depth) {
443 bnad->txq_depth = ringparam->tx_pending;
444 for (i = 0; i < bnad->num_tx; i++) {
445 if (!bnad->tx_info[i].tx)
446 continue;
447 bnad_cleanup_tx(bnad, i);
448 current_err = bnad_setup_tx(bnad, i);
449 if (current_err && !err)
450 err = current_err;
451 }
452 }
453
454 mutex_unlock(&bnad->conf_mutex);
455 return err;
456}
457
458static void
459bnad_get_pauseparam(struct net_device *netdev,
460 struct ethtool_pauseparam *pauseparam)
461{
462 struct bnad *bnad = netdev_priv(netdev);
463
464 pauseparam->autoneg = 0;
465 pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
466 pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
467}
468
469static int
470bnad_set_pauseparam(struct net_device *netdev,
471 struct ethtool_pauseparam *pauseparam)
472{
473 struct bnad *bnad = netdev_priv(netdev);
474 struct bna_pause_config pause_config;
475 unsigned long flags;
476
477 if (pauseparam->autoneg == AUTONEG_ENABLE)
478 return -EINVAL;
479
480 mutex_lock(&bnad->conf_mutex);
481 if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
482 pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
483 pause_config.rx_pause = pauseparam->rx_pause;
484 pause_config.tx_pause = pauseparam->tx_pause;
485 spin_lock_irqsave(&bnad->bna_lock, flags);
486 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
488 }
489 mutex_unlock(&bnad->conf_mutex);
490 return 0;
491}
492
493static void
494bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
495{
496 struct bnad *bnad = netdev_priv(netdev);
497 int i, j, q_num;
498 u64 bmap;
499
500 mutex_lock(&bnad->conf_mutex);
501
502 switch (stringset) {
503 case ETH_SS_STATS:
504 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
505 BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
506 ETH_GSTRING_LEN));
507 memcpy(string, bnad_net_stats_strings[i],
508 ETH_GSTRING_LEN);
509 string += ETH_GSTRING_LEN;
510 }
511 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
512 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
513 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
514 if (bmap & 1) {
515 sprintf(string, "txf%d_ucast_octets", i);
516 string += ETH_GSTRING_LEN;
517 sprintf(string, "txf%d_ucast", i);
518 string += ETH_GSTRING_LEN;
519 sprintf(string, "txf%d_ucast_vlan", i);
520 string += ETH_GSTRING_LEN;
521 sprintf(string, "txf%d_mcast_octets", i);
522 string += ETH_GSTRING_LEN;
523 sprintf(string, "txf%d_mcast", i);
524 string += ETH_GSTRING_LEN;
525 sprintf(string, "txf%d_mcast_vlan", i);
526 string += ETH_GSTRING_LEN;
527 sprintf(string, "txf%d_bcast_octets", i);
528 string += ETH_GSTRING_LEN;
529 sprintf(string, "txf%d_bcast", i);
530 string += ETH_GSTRING_LEN;
531 sprintf(string, "txf%d_bcast_vlan", i);
532 string += ETH_GSTRING_LEN;
533 sprintf(string, "txf%d_errors", i);
534 string += ETH_GSTRING_LEN;
535 sprintf(string, "txf%d_filter_vlan", i);
536 string += ETH_GSTRING_LEN;
537 sprintf(string, "txf%d_filter_mac_sa", i);
538 string += ETH_GSTRING_LEN;
539 }
540 bmap >>= 1;
541 }
542
543 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
544 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
545 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
546 if (bmap & 1) {
547 sprintf(string, "rxf%d_ucast_octets", i);
548 string += ETH_GSTRING_LEN;
549 sprintf(string, "rxf%d_ucast", i);
550 string += ETH_GSTRING_LEN;
551 sprintf(string, "rxf%d_ucast_vlan", i);
552 string += ETH_GSTRING_LEN;
553 sprintf(string, "rxf%d_mcast_octets", i);
554 string += ETH_GSTRING_LEN;
555 sprintf(string, "rxf%d_mcast", i);
556 string += ETH_GSTRING_LEN;
557 sprintf(string, "rxf%d_mcast_vlan", i);
558 string += ETH_GSTRING_LEN;
559 sprintf(string, "rxf%d_bcast_octets", i);
560 string += ETH_GSTRING_LEN;
561 sprintf(string, "rxf%d_bcast", i);
562 string += ETH_GSTRING_LEN;
563 sprintf(string, "rxf%d_bcast_vlan", i);
564 string += ETH_GSTRING_LEN;
565 sprintf(string, "rxf%d_frame_drops", i);
566 string += ETH_GSTRING_LEN;
567 }
568 bmap >>= 1;
569 }
570
571 q_num = 0;
572 for (i = 0; i < bnad->num_rx; i++) {
573 if (!bnad->rx_info[i].rx)
574 continue;
575 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
576 sprintf(string, "cq%d_producer_index", q_num);
577 string += ETH_GSTRING_LEN;
578 sprintf(string, "cq%d_consumer_index", q_num);
579 string += ETH_GSTRING_LEN;
580 sprintf(string, "cq%d_hw_producer_index",
581 q_num);
582 string += ETH_GSTRING_LEN;
583 q_num++;
584 }
585 }
586
587 q_num = 0;
588 for (i = 0; i < bnad->num_rx; i++) {
589 if (!bnad->rx_info[i].rx)
590 continue;
591 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
592 sprintf(string, "rxq%d_packets", q_num);
593 string += ETH_GSTRING_LEN;
594 sprintf(string, "rxq%d_bytes", q_num);
595 string += ETH_GSTRING_LEN;
596 sprintf(string, "rxq%d_packets_with_error",
597 q_num);
598 string += ETH_GSTRING_LEN;
599 sprintf(string, "rxq%d_allocbuf_failed", q_num);
600 string += ETH_GSTRING_LEN;
601 sprintf(string, "rxq%d_producer_index", q_num);
602 string += ETH_GSTRING_LEN;
603 sprintf(string, "rxq%d_consumer_index", q_num);
604 string += ETH_GSTRING_LEN;
605 q_num++;
606 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
607 bnad->rx_info[i].rx_ctrl[j].ccb->
608 rcb[1] &&
609 bnad->rx_info[i].rx_ctrl[j].ccb->
610 rcb[1]->rxq) {
611 sprintf(string, "rxq%d_packets", q_num);
612 string += ETH_GSTRING_LEN;
613 sprintf(string, "rxq%d_bytes", q_num);
614 string += ETH_GSTRING_LEN;
615 sprintf(string,
616 "rxq%d_packets_with_error", q_num);
617 string += ETH_GSTRING_LEN;
618 sprintf(string, "rxq%d_allocbuf_failed",
619 q_num);
620 string += ETH_GSTRING_LEN;
621 sprintf(string, "rxq%d_producer_index",
622 q_num);
623 string += ETH_GSTRING_LEN;
624 sprintf(string, "rxq%d_consumer_index",
625 q_num);
626 string += ETH_GSTRING_LEN;
627 q_num++;
628 }
629 }
630 }
631
632 q_num = 0;
633 for (i = 0; i < bnad->num_tx; i++) {
634 if (!bnad->tx_info[i].tx)
635 continue;
636 for (j = 0; j < bnad->num_txq_per_tx; j++) {
637 sprintf(string, "txq%d_packets", q_num);
638 string += ETH_GSTRING_LEN;
639 sprintf(string, "txq%d_bytes", q_num);
640 string += ETH_GSTRING_LEN;
641 sprintf(string, "txq%d_producer_index", q_num);
642 string += ETH_GSTRING_LEN;
643 sprintf(string, "txq%d_consumer_index", q_num);
644 string += ETH_GSTRING_LEN;
645 sprintf(string, "txq%d_hw_consumer_index",
646 q_num);
647 string += ETH_GSTRING_LEN;
648 q_num++;
649 }
650 }
651
652 break;
653
654 default:
655 break;
656 }
657
658 mutex_unlock(&bnad->conf_mutex);
659}
660
661static int
662bnad_get_stats_count_locked(struct net_device *netdev)
663{
664 struct bnad *bnad = netdev_priv(netdev);
665 int i, j, count, rxf_active_num = 0, txf_active_num = 0;
666 u64 bmap;
667
668 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
669 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
670 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
671 if (bmap & 1)
672 txf_active_num++;
673 bmap >>= 1;
674 }
675 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
676 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
677 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
678 if (bmap & 1)
679 rxf_active_num++;
680 bmap >>= 1;
681 }
682 count = BNAD_ETHTOOL_STATS_NUM +
683 txf_active_num * BNAD_NUM_TXF_COUNTERS +
684 rxf_active_num * BNAD_NUM_RXF_COUNTERS;
685
686 for (i = 0; i < bnad->num_rx; i++) {
687 if (!bnad->rx_info[i].rx)
688 continue;
689 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
690 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
691 for (j = 0; j < bnad->num_rxp_per_rx; j++)
692 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
693 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
694 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
695 count += BNAD_NUM_RXQ_COUNTERS;
696 }
697
698 for (i = 0; i < bnad->num_tx; i++) {
699 if (!bnad->tx_info[i].tx)
700 continue;
701 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
702 }
703 return count;
704}
705
706static int
707bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
708{
709 int i, j;
710 struct bna_rcb *rcb = NULL;
711 struct bna_tcb *tcb = NULL;
712
713 for (i = 0; i < bnad->num_rx; i++) {
714 if (!bnad->rx_info[i].rx)
715 continue;
716 for (j = 0; j < bnad->num_rxp_per_rx; j++)
717 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
718 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
719 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
720 buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
721 ccb->producer_index;
722 buf[bi++] = 0; /* ccb->consumer_index */
723 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
724 ccb->hw_producer_index);
725 }
726 }
727 for (i = 0; i < bnad->num_rx; i++) {
728 if (!bnad->rx_info[i].rx)
729 continue;
730 for (j = 0; j < bnad->num_rxp_per_rx; j++)
731 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
732 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
733 bnad->rx_info[i].rx_ctrl[j].ccb->
734 rcb[0]->rxq) {
735 rcb = bnad->rx_info[i].rx_ctrl[j].
736 ccb->rcb[0];
737 buf[bi++] = rcb->rxq->rx_packets;
738 buf[bi++] = rcb->rxq->rx_bytes;
739 buf[bi++] = rcb->rxq->
740 rx_packets_with_error;
741 buf[bi++] = rcb->rxq->
742 rxbuf_alloc_failed;
743 buf[bi++] = rcb->producer_index;
744 buf[bi++] = rcb->consumer_index;
745 }
746 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
747 bnad->rx_info[i].rx_ctrl[j].ccb->
748 rcb[1]->rxq) {
749 rcb = bnad->rx_info[i].rx_ctrl[j].
750 ccb->rcb[1];
751 buf[bi++] = rcb->rxq->rx_packets;
752 buf[bi++] = rcb->rxq->rx_bytes;
753 buf[bi++] = rcb->rxq->
754 rx_packets_with_error;
755 buf[bi++] = rcb->rxq->
756 rxbuf_alloc_failed;
757 buf[bi++] = rcb->producer_index;
758 buf[bi++] = rcb->consumer_index;
759 }
760 }
761 }
762
763 for (i = 0; i < bnad->num_tx; i++) {
764 if (!bnad->tx_info[i].tx)
765 continue;
766 for (j = 0; j < bnad->num_txq_per_tx; j++)
767 if (bnad->tx_info[i].tcb[j] &&
768 bnad->tx_info[i].tcb[j]->txq) {
769 tcb = bnad->tx_info[i].tcb[j];
770 buf[bi++] = tcb->txq->tx_packets;
771 buf[bi++] = tcb->txq->tx_bytes;
772 buf[bi++] = tcb->producer_index;
773 buf[bi++] = tcb->consumer_index;
774 buf[bi++] = *(tcb->hw_consumer_index);
775 }
776 }
777
778 return bi;
779}
780
781static void
782bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
783 u64 *buf)
784{
785 struct bnad *bnad = netdev_priv(netdev);
786 int i, j, bi;
787 unsigned long flags;
788 struct rtnl_link_stats64 *net_stats64;
789 u64 *stats64;
790 u64 bmap;
791
792 mutex_lock(&bnad->conf_mutex);
793 if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
794 mutex_unlock(&bnad->conf_mutex);
795 return;
796 }
797
798 /*
799 * Used bna_lock to sync reads from bna_stats, which is written
800 * under the same lock
801 */
802 spin_lock_irqsave(&bnad->bna_lock, flags);
803 bi = 0;
804 memset(buf, 0, stats->n_stats * sizeof(u64));
805
806 net_stats64 = (struct rtnl_link_stats64 *)buf;
807 bnad_netdev_qstats_fill(bnad, net_stats64);
808 bnad_netdev_hwstats_fill(bnad, net_stats64);
809
810 bi = sizeof(*net_stats64) / sizeof(u64);
811
812 /* Get netif_queue_stopped from stack */
813 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
814
815 /* Fill driver stats into ethtool buffers */
816 stats64 = (u64 *)&bnad->stats.drv_stats;
817 for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
818 buf[bi++] = stats64[i];
819
820 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
821 stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
822 for (i = 0;
823 i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
824 i++)
825 buf[bi++] = stats64[i];
826
827 /* Fill txf stats into ethtool buffers */
828 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
829 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
830 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
831 if (bmap & 1) {
832 stats64 = (u64 *)&bnad->stats.bna_stats->
833 hw_stats->txf_stats[i];
834 for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
835 sizeof(u64); j++)
836 buf[bi++] = stats64[j];
837 }
838 bmap >>= 1;
839 }
840
841 /* Fill rxf stats into ethtool buffers */
842 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
843 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
844 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
845 if (bmap & 1) {
846 stats64 = (u64 *)&bnad->stats.bna_stats->
847 hw_stats->rxf_stats[i];
848 for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
849 sizeof(u64); j++)
850 buf[bi++] = stats64[j];
851 }
852 bmap >>= 1;
853 }
854
855 /* Fill per Q stats into ethtool buffers */
856 bi = bnad_per_q_stats_fill(bnad, buf, bi);
857
858 spin_unlock_irqrestore(&bnad->bna_lock, flags);
859
860 mutex_unlock(&bnad->conf_mutex);
861}
862
863static int
864bnad_get_sset_count(struct net_device *netdev, int sset)
865{
866 switch (sset) {
867 case ETH_SS_STATS:
868 return bnad_get_stats_count_locked(netdev);
869 default:
870 return -EOPNOTSUPP;
871 }
872}
873
874static struct ethtool_ops bnad_ethtool_ops = {
875 .get_settings = bnad_get_settings,
876 .set_settings = bnad_set_settings,
877 .get_drvinfo = bnad_get_drvinfo,
878 .get_wol = bnad_get_wol,
879 .get_link = ethtool_op_get_link,
880 .get_coalesce = bnad_get_coalesce,
881 .set_coalesce = bnad_set_coalesce,
882 .get_ringparam = bnad_get_ringparam,
883 .set_ringparam = bnad_set_ringparam,
884 .get_pauseparam = bnad_get_pauseparam,
885 .set_pauseparam = bnad_set_pauseparam,
886 .get_strings = bnad_get_strings,
887 .get_ethtool_stats = bnad_get_ethtool_stats,
888 .get_sset_count = bnad_get_sset_count
889};
890
891void
892bnad_set_ethtool_ops(struct net_device *netdev)
893{
894 SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
895}
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
new file mode 100644
index 000000000000..a679e038747b
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -0,0 +1,80 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __CNA_H__
20#define __CNA_H__
21
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/bitops.h>
27#include <linux/timer.h>
28#include <linux/interrupt.h>
29#include <linux/if_ether.h>
30#include <asm/page.h>
31#include <asm/io.h>
32#include <asm/string.h>
33
34#include <linux/list.h>
35
36#define bfa_sm_fault(__event) do { \
37 pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
38 __event); \
39} while (0)
40
41extern char bfa_version[];
42
43#define CNA_FW_FILE_CT "ctfw_cna.bin"
44#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
45
46#pragma pack(1)
47
48#define MAC_ADDRLEN (6)
49typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
50
51#pragma pack()
52
53#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
54#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
55#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
56
57/*
58 * bfa_q_qe_init - to initialize a queue element
59 */
60#define bfa_q_qe_init(_qe) { \
61 bfa_q_next(_qe) = (struct list_head *) NULL; \
62 bfa_q_prev(_qe) = (struct list_head *) NULL; \
63}
64
65/*
66 * bfa_q_deq - dequeue an element from head of the queue
67 */
68#define bfa_q_deq(_q, _qe) { \
69 if (!list_empty(_q)) { \
70 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
71 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
72 (struct list_head *) (_q); \
73 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
74 bfa_q_qe_init(*((struct list_head **) _qe)); \
75 } else { \
76 *((struct list_head **)(_qe)) = NULL; \
77 } \
78}
79
80#endif /* __CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
new file mode 100644
index 000000000000..e8f4ecd9ebb5
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -0,0 +1,64 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/firmware.h>
19#include "cna.h"
20
21const struct firmware *bfi_fw;
22static u32 *bfi_image_ct_cna;
23static u32 bfi_image_ct_cna_size;
24
25static u32 *
26cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
27 u32 *bfi_image_size, char *fw_name)
28{
29 const struct firmware *fw;
30
31 if (request_firmware(&fw, fw_name, &pdev->dev)) {
32 pr_alert("Can't locate firmware %s\n", fw_name);
33 goto error;
34 }
35
36 *bfi_image = (u32 *)fw->data;
37 *bfi_image_size = fw->size/sizeof(u32);
38 bfi_fw = fw;
39
40 return *bfi_image;
41error:
42 return NULL;
43}
44
45u32 *
46cna_get_firmware_buf(struct pci_dev *pdev)
47{
48 if (bfi_image_ct_cna_size == 0)
49 cna_read_firmware(pdev, &bfi_image_ct_cna,
50 &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
51 return bfi_image_ct_cna;
52}
53
54u32 *
55bfa_cb_image_get_chunk(int type, u32 off)
56{
57 return (u32 *)(bfi_image_ct_cna + off);
58}
59
60u32
61bfa_cb_image_get_size(int type)
62{
63 return bfi_image_ct_cna_size;
64}