aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x
diff options
context:
space:
mode:
authorVladislav Zolotarov <vladz@broadcom.com>2011-06-13 21:33:39 -0400
committerDavid S. Miller <davem@conan.davemloft.net>2011-06-15 10:56:15 -0400
commit042181f5aa8833a8918e1a91cfaf292146ffc62c (patch)
tree8011639e79d72485e96285a3668101596528eaee /drivers/net/bnx2x
parent9ee3d37b05c2fc5c6c01e09dd3bcc4500babf76a (diff)
bnx2x: Created bnx2x_sp
Moved the HSI dependent slow path code to a separate file. Currently it contains the implementation of MACs, Rx mode, multicast addresses, indirection table, fast path queue and function configuration code. Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@conan.davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r--drivers/net/bnx2x/Makefile2
-rw-r--r--drivers/net/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h10
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c824
-rw-r--r--drivers/net/bnx2x/bnx2x_sp.c819
-rw-r--r--drivers/net/bnx2x/bnx2x_sp.h43
8 files changed, 874 insertions, 833 deletions
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index bb83a2961273..48fbdd48f88f 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_BNX2X) += bnx2x.o 5obj-$(CONFIG_BNX2X) += bnx2x.o
6 6
7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o 7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 3cf0768c890e..6d4d6d4e53c6 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1482,10 +1482,11 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1482 u32 data_hi, u32 data_lo, int common); 1482 u32 data_hi, u32 data_lo, int common);
1483 1483
1484/* Clears multicast and unicast list configuration in the chip. */ 1484/* Clears multicast and unicast list configuration in the chip. */
1485void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
1486void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
1487void bnx2x_invalidate_uc_list(struct bnx2x *bp); 1485void bnx2x_invalidate_uc_list(struct bnx2x *bp);
1488 1486
1487int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1488 int *state_p, int flags);
1489
1489void bnx2x_update_coalesce(struct bnx2x *bp); 1490void bnx2x_update_coalesce(struct bnx2x *bp);
1490int bnx2x_get_cur_phy_idx(struct bnx2x *bp); 1491int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
1491 1492
@@ -1825,6 +1826,5 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1825BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ 1826BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1826 1827
1827extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1828extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1828void bnx2x_push_indir_table(struct bnx2x *bp);
1829 1829
1830#endif /* bnx2x.h */ 1830#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index ed1d695b1777..c72e1df04728 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -24,8 +24,8 @@
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/prefetch.h> 25#include <linux/prefetch.h>
26#include "bnx2x_cmn.h" 26#include "bnx2x_cmn.h"
27
28#include "bnx2x_init.h" 27#include "bnx2x_init.h"
28#include "bnx2x_sp.h"
29 29
30static int bnx2x_setup_irqs(struct bnx2x *bp); 30static int bnx2x_setup_irqs(struct bnx2x *bp);
31 31
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 57d9354da617..5a97f92b340c 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -292,13 +292,6 @@ int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
292 */ 292 */
293void bnx2x_set_rx_mode(struct net_device *dev); 293void bnx2x_set_rx_mode(struct net_device *dev);
294 294
295/**
296 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
297 *
298 * @bp: driver handle
299 */
300void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
301
302/* Parity errors related */ 295/* Parity errors related */
303void bnx2x_inc_load_cnt(struct bnx2x *bp); 296void bnx2x_inc_load_cnt(struct bnx2x *bp);
304u32 bnx2x_dec_load_cnt(struct bnx2x *bp); 297u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
@@ -1117,6 +1110,9 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
1117void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1110void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1118void bnx2x_release_phy_lock(struct bnx2x *bp); 1111void bnx2x_release_phy_lock(struct bnx2x *bp);
1119 1112
1113void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
1114 u8 sb_index, u8 disable, u16 usec);
1115
1120/** 1116/**
1121 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1117 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
1122 * 1118 *
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index ddb99a9a803d..7a133052660a 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -25,6 +25,7 @@
25#include "bnx2x_cmn.h" 25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h" 26#include "bnx2x_dump.h"
27#include "bnx2x_init.h" 27#include "bnx2x_init.h"
28#include "bnx2x_sp.h"
28 29
29/* Note: in the format strings below %s is replaced by the queue-name which is 30/* Note: in the format strings below %s is replaced by the queue-name which is
30 * either its index or 'fcoe' for the fcoe queue. Make sure the format string 31 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index d7cab0dc57f8..84f419fcde26 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -56,6 +56,7 @@
56#include "bnx2x_init_ops.h" 56#include "bnx2x_init_ops.h"
57#include "bnx2x_cmn.h" 57#include "bnx2x_cmn.h"
58#include "bnx2x_dcb.h" 58#include "bnx2x_dcb.h"
59#include "bnx2x_sp.h"
59 60
60#include <linux/firmware.h> 61#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h" 62#include "bnx2x_fw_file_hdr.h"
@@ -162,186 +163,11 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
162* General service functions 163* General service functions
163****************************************************************************/ 164****************************************************************************/
164 165
165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid) 166static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{ 167{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov); 168 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226} 169}
227 170
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345 171
346static inline void storm_memset_eq_data(struct bnx2x *bp, 172static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data, 173 struct event_ring_data *eq_data,
@@ -2239,143 +2065,6 @@ static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2239 return true; 2065 return true;
2240} 2066}
2241 2067
2242/* must be called under rtnl_lock */
2243static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2244{
2245 u32 mask = (1 << cl_id);
2246
2247 /* initial seeting is BNX2X_ACCEPT_NONE */
2248 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2249 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2250 u8 unmatched_unicast = 0;
2251
2252 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2253 unmatched_unicast = 1;
2254
2255 if (filters & BNX2X_PROMISCUOUS_MODE) {
2256 /* promiscious - accept all, drop none */
2257 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2258 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2259 if (IS_MF_SI(bp)) {
2260 /*
2261 * SI mode defines to accept in promiscuos mode
2262 * only unmatched packets
2263 */
2264 unmatched_unicast = 1;
2265 accp_all_ucast = 0;
2266 }
2267 }
2268 if (filters & BNX2X_ACCEPT_UNICAST) {
2269 /* accept matched ucast */
2270 drop_all_ucast = 0;
2271 }
2272 if (filters & BNX2X_ACCEPT_MULTICAST)
2273 /* accept matched mcast */
2274 drop_all_mcast = 0;
2275
2276 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2277 /* accept all mcast */
2278 drop_all_ucast = 0;
2279 accp_all_ucast = 1;
2280 }
2281 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2282 /* accept all mcast */
2283 drop_all_mcast = 0;
2284 accp_all_mcast = 1;
2285 }
2286 if (filters & BNX2X_ACCEPT_BROADCAST) {
2287 /* accept (all) bcast */
2288 drop_all_bcast = 0;
2289 accp_all_bcast = 1;
2290 }
2291
2292 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2293 bp->mac_filters.ucast_drop_all | mask :
2294 bp->mac_filters.ucast_drop_all & ~mask;
2295
2296 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2297 bp->mac_filters.mcast_drop_all | mask :
2298 bp->mac_filters.mcast_drop_all & ~mask;
2299
2300 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2301 bp->mac_filters.bcast_drop_all | mask :
2302 bp->mac_filters.bcast_drop_all & ~mask;
2303
2304 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2305 bp->mac_filters.ucast_accept_all | mask :
2306 bp->mac_filters.ucast_accept_all & ~mask;
2307
2308 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2309 bp->mac_filters.mcast_accept_all | mask :
2310 bp->mac_filters.mcast_accept_all & ~mask;
2311
2312 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2313 bp->mac_filters.bcast_accept_all | mask :
2314 bp->mac_filters.bcast_accept_all & ~mask;
2315
2316 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2317 bp->mac_filters.unmatched_unicast | mask :
2318 bp->mac_filters.unmatched_unicast & ~mask;
2319}
2320
2321static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2322{
2323 struct tstorm_eth_function_common_config tcfg = {0};
2324 u16 rss_flgs;
2325
2326 /* tpa */
2327 if (p->func_flgs & FUNC_FLG_TPA)
2328 tcfg.config_flags |=
2329 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2330
2331 /* set rss flags */
2332 rss_flgs = (p->rss->mode <<
2333 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2334
2335 if (p->rss->cap & RSS_IPV4_CAP)
2336 rss_flgs |= RSS_IPV4_CAP_MASK;
2337 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2338 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2339 if (p->rss->cap & RSS_IPV6_CAP)
2340 rss_flgs |= RSS_IPV6_CAP_MASK;
2341 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2342 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2343
2344 tcfg.config_flags |= rss_flgs;
2345 tcfg.rss_result_mask = p->rss->result_mask;
2346
2347 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2348
2349 /* Enable the function in the FW */
2350 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2351 storm_memset_func_en(bp, p->func_id, 1);
2352
2353 /* statistics */
2354 if (p->func_flgs & FUNC_FLG_STATS) {
2355 struct stats_indication_flags stats_flags = {0};
2356 stats_flags.collect_eth = 1;
2357
2358 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2359 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2360
2361 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2362 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2363
2364 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2365 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2366
2367 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2368 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2369 }
2370
2371 /* spq */
2372 if (p->func_flgs & FUNC_FLG_SPQ) {
2373 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2374 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2375 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2376 }
2377}
2378
2379static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp, 2068static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2380 struct bnx2x_fastpath *fp) 2069 struct bnx2x_fastpath *fp)
2381{ 2070{
@@ -4068,7 +3757,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
4068 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 3757 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4069} 3758}
4070 3759
4071static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id, 3760void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4072 u8 sb_index, u8 disable, u16 usec) 3761 u8 sb_index, u8 disable, u16 usec)
4073{ 3762{
4074 int port = BP_PORT(bp); 3763 int port = BP_PORT(bp);
@@ -4213,20 +3902,6 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
4213 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 3902 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
4214} 3903}
4215 3904
4216void bnx2x_push_indir_table(struct bnx2x *bp)
4217{
4218 int func = BP_FUNC(bp);
4219 int i;
4220
4221 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4222 return;
4223
4224 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4225 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4226 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4227 bp->fp->cl_id + bp->rx_indir_table[i]);
4228}
4229
4230static void bnx2x_init_ind_table(struct bnx2x *bp) 3905static void bnx2x_init_ind_table(struct bnx2x *bp)
4231{ 3906{
4232 int i; 3907 int i;
@@ -4237,104 +3912,6 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4237 bnx2x_push_indir_table(bp); 3912 bnx2x_push_indir_table(bp);
4238} 3913}
4239 3914
4240void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4241{
4242 int mode = bp->rx_mode;
4243 int port = BP_PORT(bp);
4244 u16 cl_id;
4245 u32 def_q_filters = 0;
4246
4247 /* All but management unicast packets should pass to the host as well */
4248 u32 llh_mask =
4249 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4250 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4251 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4252 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4253
4254 switch (mode) {
4255 case BNX2X_RX_MODE_NONE: /* no Rx */
4256 def_q_filters = BNX2X_ACCEPT_NONE;
4257#ifdef BCM_CNIC
4258 if (!NO_FCOE(bp)) {
4259 cl_id = bnx2x_fcoe(bp, cl_id);
4260 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4261 }
4262#endif
4263 break;
4264
4265 case BNX2X_RX_MODE_NORMAL:
4266 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4267 BNX2X_ACCEPT_MULTICAST;
4268#ifdef BCM_CNIC
4269 if (!NO_FCOE(bp)) {
4270 cl_id = bnx2x_fcoe(bp, cl_id);
4271 bnx2x_rxq_set_mac_filters(bp, cl_id,
4272 BNX2X_ACCEPT_UNICAST |
4273 BNX2X_ACCEPT_MULTICAST);
4274 }
4275#endif
4276 break;
4277
4278 case BNX2X_RX_MODE_ALLMULTI:
4279 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4280 BNX2X_ACCEPT_ALL_MULTICAST;
4281#ifdef BCM_CNIC
4282 /*
4283 * Prevent duplication of multicast packets by configuring FCoE
4284 * L2 Client to receive only matched unicast frames.
4285 */
4286 if (!NO_FCOE(bp)) {
4287 cl_id = bnx2x_fcoe(bp, cl_id);
4288 bnx2x_rxq_set_mac_filters(bp, cl_id,
4289 BNX2X_ACCEPT_UNICAST);
4290 }
4291#endif
4292 break;
4293
4294 case BNX2X_RX_MODE_PROMISC:
4295 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4296#ifdef BCM_CNIC
4297 /*
4298 * Prevent packets duplication by configuring DROP_ALL for FCoE
4299 * L2 Client.
4300 */
4301 if (!NO_FCOE(bp)) {
4302 cl_id = bnx2x_fcoe(bp, cl_id);
4303 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4304 }
4305#endif
4306 /* pass management unicast packets as well */
4307 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4308 break;
4309
4310 default:
4311 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4312 break;
4313 }
4314
4315 cl_id = BP_L_ID(bp);
4316 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4317
4318 REG_WR(bp,
4319 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4320 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
4321
4322 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4323 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4324 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4325 "unmatched_ucast 0x%x\n", mode,
4326 bp->mac_filters.ucast_drop_all,
4327 bp->mac_filters.mcast_drop_all,
4328 bp->mac_filters.bcast_drop_all,
4329 bp->mac_filters.ucast_accept_all,
4330 bp->mac_filters.mcast_accept_all,
4331 bp->mac_filters.bcast_accept_all,
4332 bp->mac_filters.unmatched_unicast
4333 );
4334
4335 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4336}
4337
4338static void bnx2x_init_internal_common(struct bnx2x *bp) 3915static void bnx2x_init_internal_common(struct bnx2x *bp)
4339{ 3916{
4340 int i; 3917 int i;
@@ -5976,9 +5553,6 @@ alloc_mem_err:
5976/* 5553/*
5977 * Init service functions 5554 * Init service functions
5978 */ 5555 */
5979static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5980 int *state_p, int flags);
5981
5982int bnx2x_func_start(struct bnx2x *bp) 5556int bnx2x_func_start(struct bnx2x *bp)
5983{ 5557{
5984 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1); 5558 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -5997,75 +5571,7 @@ static int bnx2x_func_stop(struct bnx2x *bp)
5997 0, &(bp->state), WAIT_RAMROD_COMMON); 5571 0, &(bp->state), WAIT_RAMROD_COMMON);
5998} 5572}
5999 5573
6000/** 5574int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6001 * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
6002 *
6003 * @bp: driver handle
6004 * @set: set or clear an entry (1 or 0)
6005 * @mac: pointer to a buffer containing a MAC
6006 * @cl_bit_vec: bit vector of clients to register a MAC for
6007 * @cam_offset: offset in a CAM to use
6008 * @is_bcast: is the set MAC a broadcast address (for E1 only)
6009 */
6010static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6011 u32 cl_bit_vec, u8 cam_offset,
6012 u8 is_bcast)
6013{
6014 struct mac_configuration_cmd *config =
6015 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6016 int ramrod_flags = WAIT_RAMROD_COMMON;
6017
6018 bp->set_mac_pending = 1;
6019
6020 config->hdr.length = 1;
6021 config->hdr.offset = cam_offset;
6022 config->hdr.client_id = 0xff;
6023 /* Mark the single MAC configuration ramrod as opposed to a
6024 * UC/MC list configuration).
6025 */
6026 config->hdr.echo = 1;
6027
6028 /* primary MAC */
6029 config->config_table[0].msb_mac_addr =
6030 swab16(*(u16 *)&mac[0]);
6031 config->config_table[0].middle_mac_addr =
6032 swab16(*(u16 *)&mac[2]);
6033 config->config_table[0].lsb_mac_addr =
6034 swab16(*(u16 *)&mac[4]);
6035 config->config_table[0].clients_bit_vector =
6036 cpu_to_le32(cl_bit_vec);
6037 config->config_table[0].vlan_id = 0;
6038 config->config_table[0].pf_id = BP_FUNC(bp);
6039 if (set)
6040 SET_FLAG(config->config_table[0].flags,
6041 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6042 T_ETH_MAC_COMMAND_SET);
6043 else
6044 SET_FLAG(config->config_table[0].flags,
6045 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6046 T_ETH_MAC_COMMAND_INVALIDATE);
6047
6048 if (is_bcast)
6049 SET_FLAG(config->config_table[0].flags,
6050 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6051
6052 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
6053 (set ? "setting" : "clearing"),
6054 config->config_table[0].msb_mac_addr,
6055 config->config_table[0].middle_mac_addr,
6056 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6057
6058 mb();
6059
6060 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6061 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6062 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6063
6064 /* Wait for a completion */
6065 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6066}
6067
6068static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6069 int *state_p, int flags) 5575 int *state_p, int flags)
6070{ 5576{
6071 /* can take a while if any port is running */ 5577 /* can take a while if any port is running */
@@ -6205,164 +5711,6 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6205 } 5711 }
6206} 5712}
6207 5713
6208static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6209{
6210 return CHIP_REV_IS_SLOW(bp) ?
6211 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6212 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6213}
6214
6215/* set mc list, do not wait as wait implies sleep and
6216 * set_rx_mode can be invoked from non-sleepable context.
6217 *
6218 * Instead we use the same ramrod data buffer each time we need
6219 * to configure a list of addresses, and use the fact that the
6220 * list of MACs is changed in an incremental way and that the
6221 * function is called under the netif_addr_lock. A temporary
6222 * inconsistent CAM configuration (possible in case of a very fast
6223 * sequence of add/del/add on the host side) will shortly be
6224 * restored by the handler of the last ramrod.
6225 */
6226static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6227{
6228 int i = 0, old;
6229 struct net_device *dev = bp->dev;
6230 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6231 struct netdev_hw_addr *ha;
6232 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6233 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6234
6235 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6236 return -EINVAL;
6237
6238 netdev_for_each_mc_addr(ha, dev) {
6239 /* copy mac */
6240 config_cmd->config_table[i].msb_mac_addr =
6241 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6242 config_cmd->config_table[i].middle_mac_addr =
6243 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6244 config_cmd->config_table[i].lsb_mac_addr =
6245 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6246
6247 config_cmd->config_table[i].vlan_id = 0;
6248 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6249 config_cmd->config_table[i].clients_bit_vector =
6250 cpu_to_le32(1 << BP_L_ID(bp));
6251
6252 SET_FLAG(config_cmd->config_table[i].flags,
6253 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6254 T_ETH_MAC_COMMAND_SET);
6255
6256 DP(NETIF_MSG_IFUP,
6257 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6258 config_cmd->config_table[i].msb_mac_addr,
6259 config_cmd->config_table[i].middle_mac_addr,
6260 config_cmd->config_table[i].lsb_mac_addr);
6261 i++;
6262 }
6263 old = config_cmd->hdr.length;
6264 if (old > i) {
6265 for (; i < old; i++) {
6266 if (CAM_IS_INVALID(config_cmd->
6267 config_table[i])) {
6268 /* already invalidated */
6269 break;
6270 }
6271 /* invalidate */
6272 SET_FLAG(config_cmd->config_table[i].flags,
6273 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6274 T_ETH_MAC_COMMAND_INVALIDATE);
6275 }
6276 }
6277
6278 wmb();
6279
6280 config_cmd->hdr.length = i;
6281 config_cmd->hdr.offset = offset;
6282 config_cmd->hdr.client_id = 0xff;
6283 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6284 * synchronization.
6285 */
6286 config_cmd->hdr.echo = 0;
6287
6288 mb();
6289
6290 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6291 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6292}
6293
6294void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6295{
6296 int i;
6297 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6298 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6299 int ramrod_flags = WAIT_RAMROD_COMMON;
6300 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6301
6302 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6303 SET_FLAG(config_cmd->config_table[i].flags,
6304 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6305 T_ETH_MAC_COMMAND_INVALIDATE);
6306
6307 wmb();
6308
6309 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6310 config_cmd->hdr.offset = offset;
6311 config_cmd->hdr.client_id = 0xff;
6312 /* We'll wait for a completion this time... */
6313 config_cmd->hdr.echo = 1;
6314
6315 bp->set_mac_pending = 1;
6316
6317 mb();
6318
6319 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6320 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6321
6322 /* Wait for a completion */
6323 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6324 ramrod_flags);
6325
6326}
6327
6328/* Accept one or more multicasts */
6329static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6330{
6331 struct net_device *dev = bp->dev;
6332 struct netdev_hw_addr *ha;
6333 u32 mc_filter[MC_HASH_SIZE];
6334 u32 crc, bit, regidx;
6335 int i;
6336
6337 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6338
6339 netdev_for_each_mc_addr(ha, dev) {
6340 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6341 bnx2x_mc_addr(ha));
6342
6343 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6344 ETH_ALEN);
6345 bit = (crc >> 24) & 0xff;
6346 regidx = bit >> 5;
6347 bit &= 0x1f;
6348 mc_filter[regidx] |= (1 << bit);
6349 }
6350
6351 for (i = 0; i < MC_HASH_SIZE; i++)
6352 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6353 mc_filter[i]);
6354
6355 return 0;
6356}
6357
6358void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6359{
6360 int i;
6361
6362 for (i = 0; i < MC_HASH_SIZE; i++)
6363 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6364}
6365
6366#ifdef BCM_CNIC 5714#ifdef BCM_CNIC
6367/** 5715/**
6368 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 5716 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
@@ -6434,172 +5782,6 @@ int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6434} 5782}
6435#endif 5783#endif
6436 5784
6437static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6438 struct bnx2x_client_init_params *params,
6439 u8 activate,
6440 struct client_init_ramrod_data *data)
6441{
6442 /* Clear the buffer */
6443 memset(data, 0, sizeof(*data));
6444
6445 /* general */
6446 data->general.client_id = params->rxq_params.cl_id;
6447 data->general.statistics_counter_id = params->rxq_params.stat_id;
6448 data->general.statistics_en_flg =
6449 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6450 data->general.is_fcoe_flg =
6451 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6452 data->general.activate_flg = activate;
6453 data->general.sp_client_id = params->rxq_params.spcl_id;
6454
6455 /* Rx data */
6456 data->rx.tpa_en_flg =
6457 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6458 data->rx.vmqueue_mode_en_flg = 0;
6459 data->rx.cache_line_alignment_log_size =
6460 params->rxq_params.cache_line_log;
6461 data->rx.enable_dynamic_hc =
6462 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6463 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6464 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6465 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6466
6467 /* We don't set drop flags */
6468 data->rx.drop_ip_cs_err_flg = 0;
6469 data->rx.drop_tcp_cs_err_flg = 0;
6470 data->rx.drop_ttl0_flg = 0;
6471 data->rx.drop_udp_cs_err_flg = 0;
6472
6473 data->rx.inner_vlan_removal_enable_flg =
6474 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6475 data->rx.outer_vlan_removal_enable_flg =
6476 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6477 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6478 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6479 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6480 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6481 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6482 data->rx.bd_page_base.lo =
6483 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6484 data->rx.bd_page_base.hi =
6485 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6486 data->rx.sge_page_base.lo =
6487 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6488 data->rx.sge_page_base.hi =
6489 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6490 data->rx.cqe_page_base.lo =
6491 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6492 data->rx.cqe_page_base.hi =
6493 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6494 data->rx.is_leading_rss =
6495 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6496 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6497
6498 /* Tx data */
6499 data->tx.enforce_security_flg = 0; /* VF specific */
6500 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6501 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6502 data->tx.mtu = 0; /* VF specific */
6503 data->tx.tx_bd_page_base.lo =
6504 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6505 data->tx.tx_bd_page_base.hi =
6506 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6507
6508 /* flow control data */
6509 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6510 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6511 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6512 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6513 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6514 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6515 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6516
6517 data->fc.safc_group_num = params->txq_params.cos;
6518 data->fc.safc_group_en_flg =
6519 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6520 data->fc.traffic_type =
6521 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6522 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6523}
6524
6525static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6526{
6527 /* ustorm cxt validation */
6528 cxt->ustorm_ag_context.cdu_usage =
6529 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6530 ETH_CONNECTION_TYPE);
6531 /* xcontext validation */
6532 cxt->xstorm_ag_context.cdu_reserved =
6533 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6534 ETH_CONNECTION_TYPE);
6535}
6536
6537static int bnx2x_setup_fw_client(struct bnx2x *bp,
6538 struct bnx2x_client_init_params *params,
6539 u8 activate,
6540 struct client_init_ramrod_data *data,
6541 dma_addr_t data_mapping)
6542{
6543 u16 hc_usec;
6544 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6545 int ramrod_flags = 0, rc;
6546
6547 /* HC and context validation values */
6548 hc_usec = params->txq_params.hc_rate ?
6549 1000000 / params->txq_params.hc_rate : 0;
6550 bnx2x_update_coalesce_sb_index(bp,
6551 params->txq_params.fw_sb_id,
6552 params->txq_params.sb_cq_index,
6553 !(params->txq_params.flags & QUEUE_FLG_HC),
6554 hc_usec);
6555
6556 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6557
6558 hc_usec = params->rxq_params.hc_rate ?
6559 1000000 / params->rxq_params.hc_rate : 0;
6560 bnx2x_update_coalesce_sb_index(bp,
6561 params->rxq_params.fw_sb_id,
6562 params->rxq_params.sb_cq_index,
6563 !(params->rxq_params.flags & QUEUE_FLG_HC),
6564 hc_usec);
6565
6566 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6567 params->rxq_params.cid);
6568
6569 /* zero stats */
6570 if (params->txq_params.flags & QUEUE_FLG_STATS)
6571 storm_memset_xstats_zero(bp, BP_PORT(bp),
6572 params->txq_params.stat_id);
6573
6574 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6575 storm_memset_ustats_zero(bp, BP_PORT(bp),
6576 params->rxq_params.stat_id);
6577 storm_memset_tstats_zero(bp, BP_PORT(bp),
6578 params->rxq_params.stat_id);
6579 }
6580
6581 /* Fill the ramrod data */
6582 bnx2x_fill_cl_init_data(bp, params, activate, data);
6583
6584 /* SETUP ramrod.
6585 *
6586 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6587 * barrier except from mmiowb() is needed to impose a
6588 * proper ordering of memory operations.
6589 */
6590 mmiowb();
6591
6592
6593 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6594 U64_HI(data_mapping), U64_LO(data_mapping), 0);
6595
6596 /* Wait for completion */
6597 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6598 params->ramrod_params.index,
6599 params->ramrod_params.pstate,
6600 ramrod_flags);
6601 return rc;
6602}
6603 5785
6604/** 5786/**
6605 * bnx2x_set_int_mode - configure interrupt mode 5787 * bnx2x_set_int_mode - configure interrupt mode
diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c
new file mode 100644
index 000000000000..7c876a06b779
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_sp.c
@@ -0,0 +1,819 @@
1#include <linux/version.h>
2#include <linux/module.h>
3#include <linux/crc32.h>
4#include <linux/netdevice.h>
5#include <linux/etherdevice.h>
6#include <linux/crc32c.h>
7#include "bnx2x.h"
8#include "bnx2x_cmn.h"
9#include "bnx2x_sp.h"
10
11
12/**
13 * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
14 *
15 * @bp: driver handle
16 * @set: set or clear an entry (1 or 0)
17 * @mac: pointer to a buffer containing a MAC
18 * @cl_bit_vec: bit vector of clients to register a MAC for
19 * @cam_offset: offset in a CAM to use
20 * @is_bcast: is the set MAC a broadcast address (for E1 only)
21 */
22void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
23 u32 cl_bit_vec, u8 cam_offset,
24 u8 is_bcast)
25{
26 struct mac_configuration_cmd *config =
27 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
28 int ramrod_flags = WAIT_RAMROD_COMMON;
29
30 bp->set_mac_pending = 1;
31
32 config->hdr.length = 1;
33 config->hdr.offset = cam_offset;
34 config->hdr.client_id = 0xff;
35 /* Mark the single MAC configuration ramrod as opposed to a
36 * UC/MC list configuration).
37 */
38 config->hdr.echo = 1;
39
40 /* primary MAC */
41 config->config_table[0].msb_mac_addr =
42 swab16(*(u16 *)&mac[0]);
43 config->config_table[0].middle_mac_addr =
44 swab16(*(u16 *)&mac[2]);
45 config->config_table[0].lsb_mac_addr =
46 swab16(*(u16 *)&mac[4]);
47 config->config_table[0].clients_bit_vector =
48 cpu_to_le32(cl_bit_vec);
49 config->config_table[0].vlan_id = 0;
50 config->config_table[0].pf_id = BP_FUNC(bp);
51 if (set)
52 SET_FLAG(config->config_table[0].flags,
53 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
54 T_ETH_MAC_COMMAND_SET);
55 else
56 SET_FLAG(config->config_table[0].flags,
57 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
58 T_ETH_MAC_COMMAND_INVALIDATE);
59
60 if (is_bcast)
61 SET_FLAG(config->config_table[0].flags,
62 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
63
64 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
65 (set ? "setting" : "clearing"),
66 config->config_table[0].msb_mac_addr,
67 config->config_table[0].middle_mac_addr,
68 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
69
70 mb();
71
72 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
73 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
74 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
75
76 /* Wait for a completion */
77 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
78}
79
80
81static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
82{
83 return CHIP_REV_IS_SLOW(bp) ?
84 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
85 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
86}
87
88/* set mc list, do not wait as wait implies sleep and
89 * set_rx_mode can be invoked from non-sleepable context.
90 *
91 * Instead we use the same ramrod data buffer each time we need
92 * to configure a list of addresses, and use the fact that the
93 * list of MACs is changed in an incremental way and that the
94 * function is called under the netif_addr_lock. A temporary
95 * inconsistent CAM configuration (possible in case of a very fast
96 * sequence of add/del/add on the host side) will shortly be
97 * restored by the handler of the last ramrod.
98 */
99int bnx2x_set_e1_mc_list(struct bnx2x *bp)
100{
101 int i = 0, old;
102 struct net_device *dev = bp->dev;
103 u8 offset = bnx2x_e1_cam_mc_offset(bp);
104 struct netdev_hw_addr *ha;
105 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
106 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
107
108 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
109 return -EINVAL;
110
111 netdev_for_each_mc_addr(ha, dev) {
112 /* copy mac */
113 config_cmd->config_table[i].msb_mac_addr =
114 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
115 config_cmd->config_table[i].middle_mac_addr =
116 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
117 config_cmd->config_table[i].lsb_mac_addr =
118 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
119
120 config_cmd->config_table[i].vlan_id = 0;
121 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
122 config_cmd->config_table[i].clients_bit_vector =
123 cpu_to_le32(1 << BP_L_ID(bp));
124
125 SET_FLAG(config_cmd->config_table[i].flags,
126 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
127 T_ETH_MAC_COMMAND_SET);
128
129 DP(NETIF_MSG_IFUP,
130 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
131 config_cmd->config_table[i].msb_mac_addr,
132 config_cmd->config_table[i].middle_mac_addr,
133 config_cmd->config_table[i].lsb_mac_addr);
134 i++;
135 }
136 old = config_cmd->hdr.length;
137 if (old > i) {
138 for (; i < old; i++) {
139 if (CAM_IS_INVALID(config_cmd->
140 config_table[i])) {
141 /* already invalidated */
142 break;
143 }
144 /* invalidate */
145 SET_FLAG(config_cmd->config_table[i].flags,
146 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
147 T_ETH_MAC_COMMAND_INVALIDATE);
148 }
149 }
150
151 wmb();
152
153 config_cmd->hdr.length = i;
154 config_cmd->hdr.offset = offset;
155 config_cmd->hdr.client_id = 0xff;
156 /* Mark that this ramrod doesn't use bp->set_mac_pending for
157 * synchronization.
158 */
159 config_cmd->hdr.echo = 0;
160
161 mb();
162
163 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
164 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
165}
166
167void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
168{
169 int i;
170 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
171 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
172 int ramrod_flags = WAIT_RAMROD_COMMON;
173 u8 offset = bnx2x_e1_cam_mc_offset(bp);
174
175 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
176 SET_FLAG(config_cmd->config_table[i].flags,
177 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
178 T_ETH_MAC_COMMAND_INVALIDATE);
179
180 wmb();
181
182 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
183 config_cmd->hdr.offset = offset;
184 config_cmd->hdr.client_id = 0xff;
185 /* We'll wait for a completion this time... */
186 config_cmd->hdr.echo = 1;
187
188 bp->set_mac_pending = 1;
189
190 mb();
191
192 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
193 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
194
195 /* Wait for a completion */
196 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
197 ramrod_flags);
198
199}
200
201/* Accept one or more multicasts */
202int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
203{
204 struct net_device *dev = bp->dev;
205 struct netdev_hw_addr *ha;
206 u32 mc_filter[MC_HASH_SIZE];
207 u32 crc, bit, regidx;
208 int i;
209
210 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
211
212 netdev_for_each_mc_addr(ha, dev) {
213 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
214 bnx2x_mc_addr(ha));
215
216 crc = crc32c_le(0, bnx2x_mc_addr(ha),
217 ETH_ALEN);
218 bit = (crc >> 24) & 0xff;
219 regidx = bit >> 5;
220 bit &= 0x1f;
221 mc_filter[regidx] |= (1 << bit);
222 }
223
224 for (i = 0; i < MC_HASH_SIZE; i++)
225 REG_WR(bp, MC_HASH_OFFSET(bp, i),
226 mc_filter[i]);
227
228 return 0;
229}
230
231void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
232{
233 int i;
234
235 for (i = 0; i < MC_HASH_SIZE; i++)
236 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
237}
238
239/* must be called under rtnl_lock */
240void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
241{
242 u32 mask = (1 << cl_id);
243
244 /* initial seeting is BNX2X_ACCEPT_NONE */
245 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
246 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
247 u8 unmatched_unicast = 0;
248
249 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
250 unmatched_unicast = 1;
251
252 if (filters & BNX2X_PROMISCUOUS_MODE) {
253 /* promiscious - accept all, drop none */
254 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
255 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
256 if (IS_MF_SI(bp)) {
257 /*
258 * SI mode defines to accept in promiscuos mode
259 * only unmatched packets
260 */
261 unmatched_unicast = 1;
262 accp_all_ucast = 0;
263 }
264 }
265 if (filters & BNX2X_ACCEPT_UNICAST) {
266 /* accept matched ucast */
267 drop_all_ucast = 0;
268 }
269 if (filters & BNX2X_ACCEPT_MULTICAST)
270 /* accept matched mcast */
271 drop_all_mcast = 0;
272
273 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
274 /* accept all mcast */
275 drop_all_ucast = 0;
276 accp_all_ucast = 1;
277 }
278 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
279 /* accept all mcast */
280 drop_all_mcast = 0;
281 accp_all_mcast = 1;
282 }
283 if (filters & BNX2X_ACCEPT_BROADCAST) {
284 /* accept (all) bcast */
285 drop_all_bcast = 0;
286 accp_all_bcast = 1;
287 }
288
289 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
290 bp->mac_filters.ucast_drop_all | mask :
291 bp->mac_filters.ucast_drop_all & ~mask;
292
293 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
294 bp->mac_filters.mcast_drop_all | mask :
295 bp->mac_filters.mcast_drop_all & ~mask;
296
297 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
298 bp->mac_filters.bcast_drop_all | mask :
299 bp->mac_filters.bcast_drop_all & ~mask;
300
301 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
302 bp->mac_filters.ucast_accept_all | mask :
303 bp->mac_filters.ucast_accept_all & ~mask;
304
305 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
306 bp->mac_filters.mcast_accept_all | mask :
307 bp->mac_filters.mcast_accept_all & ~mask;
308
309 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
310 bp->mac_filters.bcast_accept_all | mask :
311 bp->mac_filters.bcast_accept_all & ~mask;
312
313 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
314 bp->mac_filters.unmatched_unicast | mask :
315 bp->mac_filters.unmatched_unicast & ~mask;
316}
317
318void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
319{
320 int mode = bp->rx_mode;
321 int port = BP_PORT(bp);
322 u16 cl_id;
323 u32 def_q_filters = 0;
324
325 /* All but management unicast packets should pass to the host as well */
326 u32 llh_mask =
327 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
328 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
329 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
330 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
331
332 switch (mode) {
333 case BNX2X_RX_MODE_NONE: /* no Rx */
334 def_q_filters = BNX2X_ACCEPT_NONE;
335#ifdef BCM_CNIC
336 if (!NO_FCOE(bp)) {
337 cl_id = bnx2x_fcoe(bp, cl_id);
338 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
339 }
340#endif
341 break;
342
343 case BNX2X_RX_MODE_NORMAL:
344 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
345 BNX2X_ACCEPT_MULTICAST;
346#ifdef BCM_CNIC
347 if (!NO_FCOE(bp)) {
348 cl_id = bnx2x_fcoe(bp, cl_id);
349 bnx2x_rxq_set_mac_filters(bp, cl_id,
350 BNX2X_ACCEPT_UNICAST |
351 BNX2X_ACCEPT_MULTICAST);
352 }
353#endif
354 break;
355
356 case BNX2X_RX_MODE_ALLMULTI:
357 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
358 BNX2X_ACCEPT_ALL_MULTICAST;
359#ifdef BCM_CNIC
360 /*
361 * Prevent duplication of multicast packets by configuring FCoE
362 * L2 Client to receive only matched unicast frames.
363 */
364 if (!NO_FCOE(bp)) {
365 cl_id = bnx2x_fcoe(bp, cl_id);
366 bnx2x_rxq_set_mac_filters(bp, cl_id,
367 BNX2X_ACCEPT_UNICAST);
368 }
369#endif
370 break;
371
372 case BNX2X_RX_MODE_PROMISC:
373 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
374#ifdef BCM_CNIC
375 /*
376 * Prevent packets duplication by configuring DROP_ALL for FCoE
377 * L2 Client.
378 */
379 if (!NO_FCOE(bp)) {
380 cl_id = bnx2x_fcoe(bp, cl_id);
381 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
382 }
383#endif
384 /* pass management unicast packets as well */
385 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
386 break;
387
388 default:
389 BNX2X_ERR("BAD rx mode (%d)\n", mode);
390 break;
391 }
392
393 cl_id = BP_L_ID(bp);
394 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
395
396 REG_WR(bp,
397 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
398 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
399
400 DP(NETIF_MSG_IFUP, "rx mode %d\n"
401 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
402 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
403 "unmatched_ucast 0x%x\n", mode,
404 bp->mac_filters.ucast_drop_all,
405 bp->mac_filters.mcast_drop_all,
406 bp->mac_filters.bcast_drop_all,
407 bp->mac_filters.ucast_accept_all,
408 bp->mac_filters.mcast_accept_all,
409 bp->mac_filters.bcast_accept_all,
410 bp->mac_filters.unmatched_unicast
411 );
412
413 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
414}
415
416/* RSS configuration */
417static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
418 u32 addr, dma_addr_t mapping)
419{
420 REG_WR(bp, addr, U64_LO(mapping));
421 REG_WR(bp, addr + 4, U64_HI(mapping));
422}
423
424static inline void __storm_fill(struct bnx2x *bp,
425 u32 addr, size_t size, u32 val)
426{
427 int i;
428 for (i = 0; i < size/4; i++)
429 REG_WR(bp, addr + (i * 4), val);
430}
431
432static inline void storm_memset_ustats_zero(struct bnx2x *bp,
433 u8 port, u16 stat_id)
434{
435 size_t size = sizeof(struct ustorm_per_client_stats);
436
437 u32 addr = BAR_USTRORM_INTMEM +
438 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
439
440 __storm_fill(bp, addr, size, 0);
441}
442
443static inline void storm_memset_tstats_zero(struct bnx2x *bp,
444 u8 port, u16 stat_id)
445{
446 size_t size = sizeof(struct tstorm_per_client_stats);
447
448 u32 addr = BAR_TSTRORM_INTMEM +
449 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
450
451 __storm_fill(bp, addr, size, 0);
452}
453
454static inline void storm_memset_xstats_zero(struct bnx2x *bp,
455 u8 port, u16 stat_id)
456{
457 size_t size = sizeof(struct xstorm_per_client_stats);
458
459 u32 addr = BAR_XSTRORM_INTMEM +
460 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
461
462 __storm_fill(bp, addr, size, 0);
463}
464
465
466static inline void storm_memset_spq_addr(struct bnx2x *bp,
467 dma_addr_t mapping, u16 abs_fid)
468{
469 u32 addr = XSEM_REG_FAST_MEMORY +
470 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
471
472 __storm_memset_dma_mapping(bp, addr, mapping);
473}
474
475static inline void storm_memset_xstats_flags(struct bnx2x *bp,
476 struct stats_indication_flags *flags,
477 u16 abs_fid)
478{
479 size_t size = sizeof(struct stats_indication_flags);
480
481 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
482
483 __storm_memset_struct(bp, addr, size, (u32 *)flags);
484}
485
486static inline void storm_memset_tstats_flags(struct bnx2x *bp,
487 struct stats_indication_flags *flags,
488 u16 abs_fid)
489{
490 size_t size = sizeof(struct stats_indication_flags);
491
492 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
493
494 __storm_memset_struct(bp, addr, size, (u32 *)flags);
495}
496
497static inline void storm_memset_ustats_flags(struct bnx2x *bp,
498 struct stats_indication_flags *flags,
499 u16 abs_fid)
500{
501 size_t size = sizeof(struct stats_indication_flags);
502
503 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
504
505 __storm_memset_struct(bp, addr, size, (u32 *)flags);
506}
507
508static inline void storm_memset_cstats_flags(struct bnx2x *bp,
509 struct stats_indication_flags *flags,
510 u16 abs_fid)
511{
512 size_t size = sizeof(struct stats_indication_flags);
513
514 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
515
516 __storm_memset_struct(bp, addr, size, (u32 *)flags);
517}
518
519static inline void storm_memset_xstats_addr(struct bnx2x *bp,
520 dma_addr_t mapping, u16 abs_fid)
521{
522 u32 addr = BAR_XSTRORM_INTMEM +
523 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
524
525 __storm_memset_dma_mapping(bp, addr, mapping);
526}
527
528static inline void storm_memset_tstats_addr(struct bnx2x *bp,
529 dma_addr_t mapping, u16 abs_fid)
530{
531 u32 addr = BAR_TSTRORM_INTMEM +
532 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
533
534 __storm_memset_dma_mapping(bp, addr, mapping);
535}
536
537static inline void storm_memset_ustats_addr(struct bnx2x *bp,
538 dma_addr_t mapping, u16 abs_fid)
539{
540 u32 addr = BAR_USTRORM_INTMEM +
541 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
542
543 __storm_memset_dma_mapping(bp, addr, mapping);
544}
545
546static inline void storm_memset_cstats_addr(struct bnx2x *bp,
547 dma_addr_t mapping, u16 abs_fid)
548{
549 u32 addr = BAR_CSTRORM_INTMEM +
550 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
551
552 __storm_memset_dma_mapping(bp, addr, mapping);
553}
554
555static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
556 u16 pf_id)
557{
558 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
559 pf_id);
560 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
561 pf_id);
562 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
563 pf_id);
564 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
565 pf_id);
566}
567
568static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
569 u8 enable)
570{
571 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
572 enable);
573 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
574 enable);
575 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
576 enable);
577 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
578 enable);
579}
580
581static inline void storm_memset_func_cfg(struct bnx2x *bp,
582 struct tstorm_eth_function_common_config *tcfg,
583 u16 abs_fid)
584{
585 size_t size = sizeof(struct tstorm_eth_function_common_config);
586
587 u32 addr = BAR_TSTRORM_INTMEM +
588 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
589
590 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
591}
592
593void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
594{
595 struct tstorm_eth_function_common_config tcfg = {0};
596 u16 rss_flgs;
597
598 /* tpa */
599 if (p->func_flgs & FUNC_FLG_TPA)
600 tcfg.config_flags |=
601 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
602
603 /* set rss flags */
604 rss_flgs = (p->rss->mode <<
605 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
606
607 if (p->rss->cap & RSS_IPV4_CAP)
608 rss_flgs |= RSS_IPV4_CAP_MASK;
609 if (p->rss->cap & RSS_IPV4_TCP_CAP)
610 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
611 if (p->rss->cap & RSS_IPV6_CAP)
612 rss_flgs |= RSS_IPV6_CAP_MASK;
613 if (p->rss->cap & RSS_IPV6_TCP_CAP)
614 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
615
616 tcfg.config_flags |= rss_flgs;
617 tcfg.rss_result_mask = p->rss->result_mask;
618
619 storm_memset_func_cfg(bp, &tcfg, p->func_id);
620
621 /* Enable the function in the FW */
622 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
623 storm_memset_func_en(bp, p->func_id, 1);
624
625 /* statistics */
626 if (p->func_flgs & FUNC_FLG_STATS) {
627 struct stats_indication_flags stats_flags = {0};
628 stats_flags.collect_eth = 1;
629
630 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
631 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
632
633 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
634 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
635
636 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
637 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
638
639 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
640 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
641 }
642
643 /* spq */
644 if (p->func_flgs & FUNC_FLG_SPQ) {
645 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
646 REG_WR(bp, XSEM_REG_FAST_MEMORY +
647 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
648 }
649}
650
651static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
652 struct bnx2x_client_init_params *params,
653 u8 activate,
654 struct client_init_ramrod_data *data)
655{
656 /* Clear the buffer */
657 memset(data, 0, sizeof(*data));
658
659 /* general */
660 data->general.client_id = params->rxq_params.cl_id;
661 data->general.statistics_counter_id = params->rxq_params.stat_id;
662 data->general.statistics_en_flg =
663 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
664 data->general.is_fcoe_flg =
665 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
666 data->general.activate_flg = activate;
667 data->general.sp_client_id = params->rxq_params.spcl_id;
668
669 /* Rx data */
670 data->rx.tpa_en_flg =
671 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
672 data->rx.vmqueue_mode_en_flg = 0;
673 data->rx.cache_line_alignment_log_size =
674 params->rxq_params.cache_line_log;
675 data->rx.enable_dynamic_hc =
676 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
677 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
678 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
679 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
680
681 /* We don't set drop flags */
682 data->rx.drop_ip_cs_err_flg = 0;
683 data->rx.drop_tcp_cs_err_flg = 0;
684 data->rx.drop_ttl0_flg = 0;
685 data->rx.drop_udp_cs_err_flg = 0;
686
687 data->rx.inner_vlan_removal_enable_flg =
688 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
689 data->rx.outer_vlan_removal_enable_flg =
690 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
691 data->rx.status_block_id = params->rxq_params.fw_sb_id;
692 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
693 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
694 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
695 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
696 data->rx.bd_page_base.lo =
697 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
698 data->rx.bd_page_base.hi =
699 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
700 data->rx.sge_page_base.lo =
701 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
702 data->rx.sge_page_base.hi =
703 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
704 data->rx.cqe_page_base.lo =
705 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
706 data->rx.cqe_page_base.hi =
707 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
708 data->rx.is_leading_rss =
709 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
710 data->rx.is_approx_mcast = data->rx.is_leading_rss;
711
712 /* Tx data */
713 data->tx.enforce_security_flg = 0; /* VF specific */
714 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
715 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
716 data->tx.mtu = 0; /* VF specific */
717 data->tx.tx_bd_page_base.lo =
718 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
719 data->tx.tx_bd_page_base.hi =
720 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
721
722 /* flow control data */
723 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
724 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
725 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
726 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
727 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
728 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
729 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
730
731 data->fc.safc_group_num = params->txq_params.cos;
732 data->fc.safc_group_en_flg =
733 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
734 data->fc.traffic_type =
735 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
736 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
737}
738
739
740int bnx2x_setup_fw_client(struct bnx2x *bp,
741 struct bnx2x_client_init_params *params,
742 u8 activate,
743 struct client_init_ramrod_data *data,
744 dma_addr_t data_mapping)
745{
746 u16 hc_usec;
747 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
748 int ramrod_flags = 0, rc;
749
750 /* HC and context validation values */
751 hc_usec = params->txq_params.hc_rate ?
752 1000000 / params->txq_params.hc_rate : 0;
753 bnx2x_update_coalesce_sb_index(bp,
754 params->txq_params.fw_sb_id,
755 params->txq_params.sb_cq_index,
756 !(params->txq_params.flags & QUEUE_FLG_HC),
757 hc_usec);
758
759 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
760
761 hc_usec = params->rxq_params.hc_rate ?
762 1000000 / params->rxq_params.hc_rate : 0;
763 bnx2x_update_coalesce_sb_index(bp,
764 params->rxq_params.fw_sb_id,
765 params->rxq_params.sb_cq_index,
766 !(params->rxq_params.flags & QUEUE_FLG_HC),
767 hc_usec);
768
769 bnx2x_set_ctx_validation(params->rxq_params.cxt,
770 params->rxq_params.cid);
771
772 /* zero stats */
773 if (params->txq_params.flags & QUEUE_FLG_STATS)
774 storm_memset_xstats_zero(bp, BP_PORT(bp),
775 params->txq_params.stat_id);
776
777 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
778 storm_memset_ustats_zero(bp, BP_PORT(bp),
779 params->rxq_params.stat_id);
780 storm_memset_tstats_zero(bp, BP_PORT(bp),
781 params->rxq_params.stat_id);
782 }
783
784 /* Fill the ramrod data */
785 bnx2x_fill_cl_init_data(bp, params, activate, data);
786
787 /* SETUP ramrod.
788 *
789 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
790 * barrier except from mmiowb() is needed to impose a
791 * proper ordering of memory operations.
792 */
793 mmiowb();
794
795
796 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
797 U64_HI(data_mapping), U64_LO(data_mapping), 0);
798
799 /* Wait for completion */
800 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
801 params->ramrod_params.index,
802 params->ramrod_params.pstate,
803 ramrod_flags);
804 return rc;
805}
806
807void bnx2x_push_indir_table(struct bnx2x *bp)
808{
809 int func = BP_FUNC(bp);
810 int i;
811
812 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
813 return;
814
815 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
816 REG_WR8(bp, BAR_TSTRORM_INTMEM +
817 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
818 bp->fp->cl_id + bp->rx_indir_table[i]);
819}
diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
new file mode 100644
index 000000000000..f9b755e4a108
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_sp.h
@@ -0,0 +1,43 @@
1#ifndef BNX2X_SP
2#define BNX2X_SP
3
4#include "bnx2x_reg.h"
5
6/* MAC configuration */
7void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
8 u32 cl_bit_vec, u8 cam_offset,
9 u8 is_bcast);
10
11/* Multicast */
12void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
13void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
14int bnx2x_set_e1_mc_list(struct bnx2x *bp);
15int bnx2x_set_e1h_mc_list(struct bnx2x *bp);
16
17/* Rx mode */
18void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
19void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters);
20
21/* RSS configuration */
22void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
23void bnx2x_push_indir_table(struct bnx2x *bp);
24
25/* Queue configuration */
26static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
27{
28 /* ustorm cxt validation */
29 cxt->ustorm_ag_context.cdu_usage =
30 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
31 ETH_CONNECTION_TYPE);
32 /* xcontext validation */
33 cxt->xstorm_ag_context.cdu_reserved =
34 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
35 ETH_CONNECTION_TYPE);
36}
37
38int bnx2x_setup_fw_client(struct bnx2x *bp,
39 struct bnx2x_client_init_params *params,
40 u8 activate,
41 struct client_init_ramrod_data *data,
42 dma_addr_t data_mapping);
43#endif /* BNX2X_SP */