diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/net/bnx2x/bnx2x_cmn.c | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_cmn.c')
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.c | 3593 |
1 files changed, 3593 insertions, 0 deletions
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c new file mode 100644 index 00000000000..c4cbf973641 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -0,0 +1,3593 @@ | |||
1 | /* bnx2x_cmn.c: Broadcom Everest network driver. | ||
2 | * | ||
3 | * Copyright (c) 2007-2011 Broadcom Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation. | ||
8 | * | ||
9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | ||
10 | * Written by: Eliezer Tamir | ||
11 | * Based on code from Michael Chan's bnx2 driver | ||
12 | * UDP CSUM errata workaround by Arik Gendelman | ||
13 | * Slowpath and fastpath rework by Vladislav Zolotarov | ||
14 | * Statistics and Link management by Yitchak Gertner | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/etherdevice.h> | ||
19 | #include <linux/if_vlan.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/ip.h> | ||
22 | #include <net/ipv6.h> | ||
23 | #include <net/ip6_checksum.h> | ||
24 | #include <linux/firmware.h> | ||
25 | #include <linux/prefetch.h> | ||
26 | #include "bnx2x_cmn.h" | ||
27 | #include "bnx2x_init.h" | ||
28 | #include "bnx2x_sp.h" | ||
29 | |||
30 | |||
31 | |||
32 | /** | ||
33 | * bnx2x_bz_fp - zero content of the fastpath structure. | ||
34 | * | ||
35 | * @bp: driver handle | ||
36 | * @index: fastpath index to be zeroed | ||
37 | * | ||
38 | * Makes sure the contents of the bp->fp[index].napi is kept | ||
39 | * intact. | ||
40 | */ | ||
41 | static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) | ||
42 | { | ||
43 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
44 | struct napi_struct orig_napi = fp->napi; | ||
45 | /* bzero bnx2x_fastpath contents */ | ||
46 | memset(fp, 0, sizeof(*fp)); | ||
47 | |||
48 | /* Restore the NAPI object as it has been already initialized */ | ||
49 | fp->napi = orig_napi; | ||
50 | |||
51 | fp->bp = bp; | ||
52 | fp->index = index; | ||
53 | if (IS_ETH_FP(fp)) | ||
54 | fp->max_cos = bp->max_cos; | ||
55 | else | ||
56 | /* Special queues support only one CoS */ | ||
57 | fp->max_cos = 1; | ||
58 | |||
59 | /* | ||
60 | * set the tpa flag for each queue. The tpa flag determines the queue | ||
61 | * minimal size so it must be set prior to queue memory allocation | ||
62 | */ | ||
63 | fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); | ||
64 | |||
65 | #ifdef BCM_CNIC | ||
66 | /* We don't want TPA on an FCoE L2 ring */ | ||
67 | if (IS_FCOE_FP(fp)) | ||
68 | fp->disable_tpa = 1; | ||
69 | #endif | ||
70 | } | ||
71 | |||
72 | /** | ||
73 | * bnx2x_move_fp - move content of the fastpath structure. | ||
74 | * | ||
75 | * @bp: driver handle | ||
76 | * @from: source FP index | ||
77 | * @to: destination FP index | ||
78 | * | ||
79 | * Makes sure the contents of the bp->fp[to].napi is kept | ||
80 | * intact. | ||
81 | */ | ||
82 | static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | ||
83 | { | ||
84 | struct bnx2x_fastpath *from_fp = &bp->fp[from]; | ||
85 | struct bnx2x_fastpath *to_fp = &bp->fp[to]; | ||
86 | struct napi_struct orig_napi = to_fp->napi; | ||
87 | /* Move bnx2x_fastpath contents */ | ||
88 | memcpy(to_fp, from_fp, sizeof(*to_fp)); | ||
89 | to_fp->index = to; | ||
90 | |||
91 | /* Restore the NAPI object as it has been already initialized */ | ||
92 | to_fp->napi = orig_napi; | ||
93 | } | ||
94 | |||
95 | int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ | ||
96 | |||
97 | /* free skb in the packet ring at pos idx | ||
98 | * return idx of last bd freed | ||
99 | */ | ||
100 | static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, | ||
101 | u16 idx) | ||
102 | { | ||
103 | struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; | ||
104 | struct eth_tx_start_bd *tx_start_bd; | ||
105 | struct eth_tx_bd *tx_data_bd; | ||
106 | struct sk_buff *skb = tx_buf->skb; | ||
107 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | ||
108 | int nbd; | ||
109 | |||
110 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | ||
111 | prefetch(&skb->end); | ||
112 | |||
113 | DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", | ||
114 | txdata->txq_index, idx, tx_buf, skb); | ||
115 | |||
116 | /* unmap first bd */ | ||
117 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); | ||
118 | tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; | ||
119 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | ||
120 | BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); | ||
121 | |||
122 | |||
123 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | ||
124 | #ifdef BNX2X_STOP_ON_ERROR | ||
125 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { | ||
126 | BNX2X_ERR("BAD nbd!\n"); | ||
127 | bnx2x_panic(); | ||
128 | } | ||
129 | #endif | ||
130 | new_cons = nbd + tx_buf->first_bd; | ||
131 | |||
132 | /* Get the next bd */ | ||
133 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
134 | |||
135 | /* Skip a parse bd... */ | ||
136 | --nbd; | ||
137 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
138 | |||
139 | /* ...and the TSO split header bd since they have no mapping */ | ||
140 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { | ||
141 | --nbd; | ||
142 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
143 | } | ||
144 | |||
145 | /* now free frags */ | ||
146 | while (nbd > 0) { | ||
147 | |||
148 | DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); | ||
149 | tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; | ||
150 | dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), | ||
151 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); | ||
152 | if (--nbd) | ||
153 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | ||
154 | } | ||
155 | |||
156 | /* release skb */ | ||
157 | WARN_ON(!skb); | ||
158 | dev_kfree_skb_any(skb); | ||
159 | tx_buf->first_bd = 0; | ||
160 | tx_buf->skb = NULL; | ||
161 | |||
162 | return new_cons; | ||
163 | } | ||
164 | |||
165 | int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) | ||
166 | { | ||
167 | struct netdev_queue *txq; | ||
168 | u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; | ||
169 | |||
170 | #ifdef BNX2X_STOP_ON_ERROR | ||
171 | if (unlikely(bp->panic)) | ||
172 | return -1; | ||
173 | #endif | ||
174 | |||
175 | txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); | ||
176 | hw_cons = le16_to_cpu(*txdata->tx_cons_sb); | ||
177 | sw_cons = txdata->tx_pkt_cons; | ||
178 | |||
179 | while (sw_cons != hw_cons) { | ||
180 | u16 pkt_cons; | ||
181 | |||
182 | pkt_cons = TX_BD(sw_cons); | ||
183 | |||
184 | DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " | ||
185 | " pkt_cons %u\n", | ||
186 | txdata->txq_index, hw_cons, sw_cons, pkt_cons); | ||
187 | |||
188 | bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); | ||
189 | sw_cons++; | ||
190 | } | ||
191 | |||
192 | txdata->tx_pkt_cons = sw_cons; | ||
193 | txdata->tx_bd_cons = bd_cons; | ||
194 | |||
195 | /* Need to make the tx_bd_cons update visible to start_xmit() | ||
196 | * before checking for netif_tx_queue_stopped(). Without the | ||
197 | * memory barrier, there is a small possibility that | ||
198 | * start_xmit() will miss it and cause the queue to be stopped | ||
199 | * forever. | ||
200 | * On the other hand we need an rmb() here to ensure the proper | ||
201 | * ordering of bit testing in the following | ||
202 | * netif_tx_queue_stopped(txq) call. | ||
203 | */ | ||
204 | smp_mb(); | ||
205 | |||
206 | if (unlikely(netif_tx_queue_stopped(txq))) { | ||
207 | /* Taking tx_lock() is needed to prevent reenabling the queue | ||
208 | * while it's empty. This could have happen if rx_action() gets | ||
209 | * suspended in bnx2x_tx_int() after the condition before | ||
210 | * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): | ||
211 | * | ||
212 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> | ||
213 | * sends some packets consuming the whole queue again-> | ||
214 | * stops the queue | ||
215 | */ | ||
216 | |||
217 | __netif_tx_lock(txq, smp_processor_id()); | ||
218 | |||
219 | if ((netif_tx_queue_stopped(txq)) && | ||
220 | (bp->state == BNX2X_STATE_OPEN) && | ||
221 | (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) | ||
222 | netif_tx_wake_queue(txq); | ||
223 | |||
224 | __netif_tx_unlock(txq); | ||
225 | } | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, | ||
230 | u16 idx) | ||
231 | { | ||
232 | u16 last_max = fp->last_max_sge; | ||
233 | |||
234 | if (SUB_S16(idx, last_max) > 0) | ||
235 | fp->last_max_sge = idx; | ||
236 | } | ||
237 | |||
238 | static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | ||
239 | struct eth_fast_path_rx_cqe *fp_cqe) | ||
240 | { | ||
241 | struct bnx2x *bp = fp->bp; | ||
242 | u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - | ||
243 | le16_to_cpu(fp_cqe->len_on_bd)) >> | ||
244 | SGE_PAGE_SHIFT; | ||
245 | u16 last_max, last_elem, first_elem; | ||
246 | u16 delta = 0; | ||
247 | u16 i; | ||
248 | |||
249 | if (!sge_len) | ||
250 | return; | ||
251 | |||
252 | /* First mark all used pages */ | ||
253 | for (i = 0; i < sge_len; i++) | ||
254 | BIT_VEC64_CLEAR_BIT(fp->sge_mask, | ||
255 | RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); | ||
256 | |||
257 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", | ||
258 | sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); | ||
259 | |||
260 | /* Here we assume that the last SGE index is the biggest */ | ||
261 | prefetch((void *)(fp->sge_mask)); | ||
262 | bnx2x_update_last_max_sge(fp, | ||
263 | le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); | ||
264 | |||
265 | last_max = RX_SGE(fp->last_max_sge); | ||
266 | last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; | ||
267 | first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; | ||
268 | |||
269 | /* If ring is not full */ | ||
270 | if (last_elem + 1 != first_elem) | ||
271 | last_elem++; | ||
272 | |||
273 | /* Now update the prod */ | ||
274 | for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { | ||
275 | if (likely(fp->sge_mask[i])) | ||
276 | break; | ||
277 | |||
278 | fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; | ||
279 | delta += BIT_VEC64_ELEM_SZ; | ||
280 | } | ||
281 | |||
282 | if (delta > 0) { | ||
283 | fp->rx_sge_prod += delta; | ||
284 | /* clear page-end entries */ | ||
285 | bnx2x_clear_sge_mask_next_elems(fp); | ||
286 | } | ||
287 | |||
288 | DP(NETIF_MSG_RX_STATUS, | ||
289 | "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", | ||
290 | fp->last_max_sge, fp->rx_sge_prod); | ||
291 | } | ||
292 | |||
293 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | ||
294 | struct sk_buff *skb, u16 cons, u16 prod, | ||
295 | struct eth_fast_path_rx_cqe *cqe) | ||
296 | { | ||
297 | struct bnx2x *bp = fp->bp; | ||
298 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | ||
299 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | ||
300 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | ||
301 | dma_addr_t mapping; | ||
302 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; | ||
303 | struct sw_rx_bd *first_buf = &tpa_info->first_buf; | ||
304 | |||
305 | /* print error if current state != stop */ | ||
306 | if (tpa_info->tpa_state != BNX2X_TPA_STOP) | ||
307 | BNX2X_ERR("start of bin not in stop [%d]\n", queue); | ||
308 | |||
309 | /* Try to map an empty skb from the aggregation info */ | ||
310 | mapping = dma_map_single(&bp->pdev->dev, | ||
311 | first_buf->skb->data, | ||
312 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
313 | /* | ||
314 | * ...if it fails - move the skb from the consumer to the producer | ||
315 | * and set the current aggregation state as ERROR to drop it | ||
316 | * when TPA_STOP arrives. | ||
317 | */ | ||
318 | |||
319 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
320 | /* Move the BD from the consumer to the producer */ | ||
321 | bnx2x_reuse_rx_skb(fp, cons, prod); | ||
322 | tpa_info->tpa_state = BNX2X_TPA_ERROR; | ||
323 | return; | ||
324 | } | ||
325 | |||
326 | /* move empty skb from pool to prod */ | ||
327 | prod_rx_buf->skb = first_buf->skb; | ||
328 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | ||
329 | /* point prod_bd to new skb */ | ||
330 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
331 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
332 | |||
333 | /* move partial skb from cons to pool (don't unmap yet) */ | ||
334 | *first_buf = *cons_rx_buf; | ||
335 | |||
336 | /* mark bin state as START */ | ||
337 | tpa_info->parsing_flags = | ||
338 | le16_to_cpu(cqe->pars_flags.flags); | ||
339 | tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); | ||
340 | tpa_info->tpa_state = BNX2X_TPA_START; | ||
341 | tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); | ||
342 | tpa_info->placement_offset = cqe->placement_offset; | ||
343 | |||
344 | #ifdef BNX2X_STOP_ON_ERROR | ||
345 | fp->tpa_queue_used |= (1 << queue); | ||
346 | #ifdef _ASM_GENERIC_INT_L64_H | ||
347 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | ||
348 | #else | ||
349 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", | ||
350 | #endif | ||
351 | fp->tpa_queue_used); | ||
352 | #endif | ||
353 | } | ||
354 | |||
355 | /* Timestamp option length allowed for TPA aggregation: | ||
356 | * | ||
357 | * nop nop kind length echo val | ||
358 | */ | ||
359 | #define TPA_TSTAMP_OPT_LEN 12 | ||
360 | /** | ||
361 | * bnx2x_set_lro_mss - calculate the approximate value of the MSS | ||
362 | * | ||
363 | * @bp: driver handle | ||
364 | * @parsing_flags: parsing flags from the START CQE | ||
365 | * @len_on_bd: total length of the first packet for the | ||
366 | * aggregation. | ||
367 | * | ||
368 | * Approximate value of the MSS for this aggregation calculated using | ||
369 | * the first packet of it. | ||
370 | */ | ||
371 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | ||
372 | u16 len_on_bd) | ||
373 | { | ||
374 | /* | ||
375 | * TPA arrgregation won't have either IP options or TCP options | ||
376 | * other than timestamp or IPv6 extension headers. | ||
377 | */ | ||
378 | u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); | ||
379 | |||
380 | if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == | ||
381 | PRS_FLAG_OVERETH_IPV6) | ||
382 | hdrs_len += sizeof(struct ipv6hdr); | ||
383 | else /* IPv4 */ | ||
384 | hdrs_len += sizeof(struct iphdr); | ||
385 | |||
386 | |||
387 | /* Check if there was a TCP timestamp, if there is it's will | ||
388 | * always be 12 bytes length: nop nop kind length echo val. | ||
389 | * | ||
390 | * Otherwise FW would close the aggregation. | ||
391 | */ | ||
392 | if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) | ||
393 | hdrs_len += TPA_TSTAMP_OPT_LEN; | ||
394 | |||
395 | return len_on_bd - hdrs_len; | ||
396 | } | ||
397 | |||
398 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
399 | u16 queue, struct sk_buff *skb, | ||
400 | struct eth_end_agg_rx_cqe *cqe, | ||
401 | u16 cqe_idx) | ||
402 | { | ||
403 | struct sw_rx_page *rx_pg, old_rx_pg; | ||
404 | u32 i, frag_len, frag_size, pages; | ||
405 | int err; | ||
406 | int j; | ||
407 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; | ||
408 | u16 len_on_bd = tpa_info->len_on_bd; | ||
409 | |||
410 | frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; | ||
411 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; | ||
412 | |||
413 | /* This is needed in order to enable forwarding support */ | ||
414 | if (frag_size) | ||
415 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, | ||
416 | tpa_info->parsing_flags, len_on_bd); | ||
417 | |||
418 | #ifdef BNX2X_STOP_ON_ERROR | ||
419 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | ||
420 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | ||
421 | pages, cqe_idx); | ||
422 | BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); | ||
423 | bnx2x_panic(); | ||
424 | return -EINVAL; | ||
425 | } | ||
426 | #endif | ||
427 | |||
428 | /* Run through the SGL and compose the fragmented skb */ | ||
429 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { | ||
430 | u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); | ||
431 | |||
432 | /* FW gives the indices of the SGE as if the ring is an array | ||
433 | (meaning that "next" element will consume 2 indices) */ | ||
434 | frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); | ||
435 | rx_pg = &fp->rx_page_ring[sge_idx]; | ||
436 | old_rx_pg = *rx_pg; | ||
437 | |||
438 | /* If we fail to allocate a substitute page, we simply stop | ||
439 | where we are and drop the whole packet */ | ||
440 | err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); | ||
441 | if (unlikely(err)) { | ||
442 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
443 | return err; | ||
444 | } | ||
445 | |||
446 | /* Unmap the page as we r going to pass it to the stack */ | ||
447 | dma_unmap_page(&bp->pdev->dev, | ||
448 | dma_unmap_addr(&old_rx_pg, mapping), | ||
449 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); | ||
450 | |||
451 | /* Add one frag and update the appropriate fields in the skb */ | ||
452 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); | ||
453 | |||
454 | skb->data_len += frag_len; | ||
455 | skb->truesize += frag_len; | ||
456 | skb->len += frag_len; | ||
457 | |||
458 | frag_size -= frag_len; | ||
459 | } | ||
460 | |||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | ||
465 | u16 queue, struct eth_end_agg_rx_cqe *cqe, | ||
466 | u16 cqe_idx) | ||
467 | { | ||
468 | struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; | ||
469 | struct sw_rx_bd *rx_buf = &tpa_info->first_buf; | ||
470 | u8 pad = tpa_info->placement_offset; | ||
471 | u16 len = tpa_info->len_on_bd; | ||
472 | struct sk_buff *skb = rx_buf->skb; | ||
473 | /* alloc new skb */ | ||
474 | struct sk_buff *new_skb; | ||
475 | u8 old_tpa_state = tpa_info->tpa_state; | ||
476 | |||
477 | tpa_info->tpa_state = BNX2X_TPA_STOP; | ||
478 | |||
479 | /* If we there was an error during the handling of the TPA_START - | ||
480 | * drop this aggregation. | ||
481 | */ | ||
482 | if (old_tpa_state == BNX2X_TPA_ERROR) | ||
483 | goto drop; | ||
484 | |||
485 | /* Try to allocate the new skb */ | ||
486 | new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); | ||
487 | |||
488 | /* Unmap skb in the pool anyway, as we are going to change | ||
489 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | ||
490 | fails. */ | ||
491 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), | ||
492 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
493 | |||
494 | if (likely(new_skb)) { | ||
495 | prefetch(skb); | ||
496 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | ||
497 | |||
498 | #ifdef BNX2X_STOP_ON_ERROR | ||
499 | if (pad + len > fp->rx_buf_size) { | ||
500 | BNX2X_ERR("skb_put is about to fail... " | ||
501 | "pad %d len %d rx_buf_size %d\n", | ||
502 | pad, len, fp->rx_buf_size); | ||
503 | bnx2x_panic(); | ||
504 | return; | ||
505 | } | ||
506 | #endif | ||
507 | |||
508 | skb_reserve(skb, pad); | ||
509 | skb_put(skb, len); | ||
510 | |||
511 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
512 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
513 | |||
514 | if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { | ||
515 | if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) | ||
516 | __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); | ||
517 | napi_gro_receive(&fp->napi, skb); | ||
518 | } else { | ||
519 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" | ||
520 | " - dropping packet!\n"); | ||
521 | dev_kfree_skb_any(skb); | ||
522 | } | ||
523 | |||
524 | |||
525 | /* put new skb in bin */ | ||
526 | rx_buf->skb = new_skb; | ||
527 | |||
528 | return; | ||
529 | } | ||
530 | |||
531 | drop: | ||
532 | /* drop the packet and keep the buffer in the bin */ | ||
533 | DP(NETIF_MSG_RX_STATUS, | ||
534 | "Failed to allocate or map a new skb - dropping packet!\n"); | ||
535 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
536 | } | ||
537 | |||
538 | /* Set Toeplitz hash value in the skb using the value from the | ||
539 | * CQE (calculated by HW). | ||
540 | */ | ||
541 | static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, | ||
542 | struct sk_buff *skb) | ||
543 | { | ||
544 | /* Set Toeplitz hash from CQE */ | ||
545 | if ((bp->dev->features & NETIF_F_RXHASH) && | ||
546 | (cqe->fast_path_cqe.status_flags & | ||
547 | ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) | ||
548 | skb->rxhash = | ||
549 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); | ||
550 | } | ||
551 | |||
552 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | ||
553 | { | ||
554 | struct bnx2x *bp = fp->bp; | ||
555 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; | ||
556 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; | ||
557 | int rx_pkt = 0; | ||
558 | |||
559 | #ifdef BNX2X_STOP_ON_ERROR | ||
560 | if (unlikely(bp->panic)) | ||
561 | return 0; | ||
562 | #endif | ||
563 | |||
564 | /* CQ "next element" is of the size of the regular element, | ||
565 | that's why it's ok here */ | ||
566 | hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); | ||
567 | if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
568 | hw_comp_cons++; | ||
569 | |||
570 | bd_cons = fp->rx_bd_cons; | ||
571 | bd_prod = fp->rx_bd_prod; | ||
572 | bd_prod_fw = bd_prod; | ||
573 | sw_comp_cons = fp->rx_comp_cons; | ||
574 | sw_comp_prod = fp->rx_comp_prod; | ||
575 | |||
576 | /* Memory barrier necessary as speculative reads of the rx | ||
577 | * buffer can be ahead of the index in the status block | ||
578 | */ | ||
579 | rmb(); | ||
580 | |||
581 | DP(NETIF_MSG_RX_STATUS, | ||
582 | "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", | ||
583 | fp->index, hw_comp_cons, sw_comp_cons); | ||
584 | |||
585 | while (sw_comp_cons != hw_comp_cons) { | ||
586 | struct sw_rx_bd *rx_buf = NULL; | ||
587 | struct sk_buff *skb; | ||
588 | union eth_rx_cqe *cqe; | ||
589 | struct eth_fast_path_rx_cqe *cqe_fp; | ||
590 | u8 cqe_fp_flags; | ||
591 | enum eth_rx_cqe_type cqe_fp_type; | ||
592 | u16 len, pad; | ||
593 | |||
594 | #ifdef BNX2X_STOP_ON_ERROR | ||
595 | if (unlikely(bp->panic)) | ||
596 | return 0; | ||
597 | #endif | ||
598 | |||
599 | comp_ring_cons = RCQ_BD(sw_comp_cons); | ||
600 | bd_prod = RX_BD(bd_prod); | ||
601 | bd_cons = RX_BD(bd_cons); | ||
602 | |||
603 | /* Prefetch the page containing the BD descriptor | ||
604 | at producer's index. It will be needed when new skb is | ||
605 | allocated */ | ||
606 | prefetch((void *)(PAGE_ALIGN((unsigned long) | ||
607 | (&fp->rx_desc_ring[bd_prod])) - | ||
608 | PAGE_SIZE + 1)); | ||
609 | |||
610 | cqe = &fp->rx_comp_ring[comp_ring_cons]; | ||
611 | cqe_fp = &cqe->fast_path_cqe; | ||
612 | cqe_fp_flags = cqe_fp->type_error_flags; | ||
613 | cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; | ||
614 | |||
615 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | ||
616 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), | ||
617 | cqe_fp_flags, cqe_fp->status_flags, | ||
618 | le32_to_cpu(cqe_fp->rss_hash_result), | ||
619 | le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); | ||
620 | |||
621 | /* is this a slowpath msg? */ | ||
622 | if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { | ||
623 | bnx2x_sp_event(fp, cqe); | ||
624 | goto next_cqe; | ||
625 | |||
626 | /* this is an rx packet */ | ||
627 | } else { | ||
628 | rx_buf = &fp->rx_buf_ring[bd_cons]; | ||
629 | skb = rx_buf->skb; | ||
630 | prefetch(skb); | ||
631 | |||
632 | if (!CQE_TYPE_FAST(cqe_fp_type)) { | ||
633 | #ifdef BNX2X_STOP_ON_ERROR | ||
634 | /* sanity check */ | ||
635 | if (fp->disable_tpa && | ||
636 | (CQE_TYPE_START(cqe_fp_type) || | ||
637 | CQE_TYPE_STOP(cqe_fp_type))) | ||
638 | BNX2X_ERR("START/STOP packet while " | ||
639 | "disable_tpa type %x\n", | ||
640 | CQE_TYPE(cqe_fp_type)); | ||
641 | #endif | ||
642 | |||
643 | if (CQE_TYPE_START(cqe_fp_type)) { | ||
644 | u16 queue = cqe_fp->queue_index; | ||
645 | DP(NETIF_MSG_RX_STATUS, | ||
646 | "calling tpa_start on queue %d\n", | ||
647 | queue); | ||
648 | |||
649 | bnx2x_tpa_start(fp, queue, skb, | ||
650 | bd_cons, bd_prod, | ||
651 | cqe_fp); | ||
652 | |||
653 | /* Set Toeplitz hash for LRO skb */ | ||
654 | bnx2x_set_skb_rxhash(bp, cqe, skb); | ||
655 | |||
656 | goto next_rx; | ||
657 | |||
658 | } else { | ||
659 | u16 queue = | ||
660 | cqe->end_agg_cqe.queue_index; | ||
661 | DP(NETIF_MSG_RX_STATUS, | ||
662 | "calling tpa_stop on queue %d\n", | ||
663 | queue); | ||
664 | |||
665 | bnx2x_tpa_stop(bp, fp, queue, | ||
666 | &cqe->end_agg_cqe, | ||
667 | comp_ring_cons); | ||
668 | #ifdef BNX2X_STOP_ON_ERROR | ||
669 | if (bp->panic) | ||
670 | return 0; | ||
671 | #endif | ||
672 | |||
673 | bnx2x_update_sge_prod(fp, cqe_fp); | ||
674 | goto next_cqe; | ||
675 | } | ||
676 | } | ||
677 | /* non TPA */ | ||
678 | len = le16_to_cpu(cqe_fp->pkt_len); | ||
679 | pad = cqe_fp->placement_offset; | ||
680 | dma_sync_single_for_cpu(&bp->pdev->dev, | ||
681 | dma_unmap_addr(rx_buf, mapping), | ||
682 | pad + RX_COPY_THRESH, | ||
683 | DMA_FROM_DEVICE); | ||
684 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | ||
685 | |||
686 | /* is this an error packet? */ | ||
687 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { | ||
688 | DP(NETIF_MSG_RX_ERR, | ||
689 | "ERROR flags %x rx packet %u\n", | ||
690 | cqe_fp_flags, sw_comp_cons); | ||
691 | fp->eth_q_stats.rx_err_discard_pkt++; | ||
692 | goto reuse_rx; | ||
693 | } | ||
694 | |||
695 | /* Since we don't have a jumbo ring | ||
696 | * copy small packets if mtu > 1500 | ||
697 | */ | ||
698 | if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && | ||
699 | (len <= RX_COPY_THRESH)) { | ||
700 | struct sk_buff *new_skb; | ||
701 | |||
702 | new_skb = netdev_alloc_skb(bp->dev, len + pad); | ||
703 | if (new_skb == NULL) { | ||
704 | DP(NETIF_MSG_RX_ERR, | ||
705 | "ERROR packet dropped " | ||
706 | "because of alloc failure\n"); | ||
707 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
708 | goto reuse_rx; | ||
709 | } | ||
710 | |||
711 | /* aligned copy */ | ||
712 | skb_copy_from_linear_data_offset(skb, pad, | ||
713 | new_skb->data + pad, len); | ||
714 | skb_reserve(new_skb, pad); | ||
715 | skb_put(new_skb, len); | ||
716 | |||
717 | bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); | ||
718 | |||
719 | skb = new_skb; | ||
720 | |||
721 | } else | ||
722 | if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { | ||
723 | dma_unmap_single(&bp->pdev->dev, | ||
724 | dma_unmap_addr(rx_buf, mapping), | ||
725 | fp->rx_buf_size, | ||
726 | DMA_FROM_DEVICE); | ||
727 | skb_reserve(skb, pad); | ||
728 | skb_put(skb, len); | ||
729 | |||
730 | } else { | ||
731 | DP(NETIF_MSG_RX_ERR, | ||
732 | "ERROR packet dropped because " | ||
733 | "of alloc failure\n"); | ||
734 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
735 | reuse_rx: | ||
736 | bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); | ||
737 | goto next_rx; | ||
738 | } | ||
739 | |||
740 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
741 | |||
742 | /* Set Toeplitz hash for a none-LRO skb */ | ||
743 | bnx2x_set_skb_rxhash(bp, cqe, skb); | ||
744 | |||
745 | skb_checksum_none_assert(skb); | ||
746 | |||
747 | if (bp->dev->features & NETIF_F_RXCSUM) { | ||
748 | |||
749 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | ||
750 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
751 | else | ||
752 | fp->eth_q_stats.hw_csum_err++; | ||
753 | } | ||
754 | } | ||
755 | |||
756 | skb_record_rx_queue(skb, fp->index); | ||
757 | |||
758 | if (le16_to_cpu(cqe_fp->pars_flags.flags) & | ||
759 | PARSING_FLAGS_VLAN) | ||
760 | __vlan_hwaccel_put_tag(skb, | ||
761 | le16_to_cpu(cqe_fp->vlan_tag)); | ||
762 | napi_gro_receive(&fp->napi, skb); | ||
763 | |||
764 | |||
765 | next_rx: | ||
766 | rx_buf->skb = NULL; | ||
767 | |||
768 | bd_cons = NEXT_RX_IDX(bd_cons); | ||
769 | bd_prod = NEXT_RX_IDX(bd_prod); | ||
770 | bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); | ||
771 | rx_pkt++; | ||
772 | next_cqe: | ||
773 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); | ||
774 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); | ||
775 | |||
776 | if (rx_pkt == budget) | ||
777 | break; | ||
778 | } /* while */ | ||
779 | |||
780 | fp->rx_bd_cons = bd_cons; | ||
781 | fp->rx_bd_prod = bd_prod_fw; | ||
782 | fp->rx_comp_cons = sw_comp_cons; | ||
783 | fp->rx_comp_prod = sw_comp_prod; | ||
784 | |||
785 | /* Update producers */ | ||
786 | bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, | ||
787 | fp->rx_sge_prod); | ||
788 | |||
789 | fp->rx_pkt += rx_pkt; | ||
790 | fp->rx_calls++; | ||
791 | |||
792 | return rx_pkt; | ||
793 | } | ||
794 | |||
795 | static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | ||
796 | { | ||
797 | struct bnx2x_fastpath *fp = fp_cookie; | ||
798 | struct bnx2x *bp = fp->bp; | ||
799 | u8 cos; | ||
800 | |||
801 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " | ||
802 | "[fp %d fw_sd %d igusb %d]\n", | ||
803 | fp->index, fp->fw_sb_id, fp->igu_sb_id); | ||
804 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); | ||
805 | |||
806 | #ifdef BNX2X_STOP_ON_ERROR | ||
807 | if (unlikely(bp->panic)) | ||
808 | return IRQ_HANDLED; | ||
809 | #endif | ||
810 | |||
811 | /* Handle Rx and Tx according to MSI-X vector */ | ||
812 | prefetch(fp->rx_cons_sb); | ||
813 | |||
814 | for_each_cos_in_tx_queue(fp, cos) | ||
815 | prefetch(fp->txdata[cos].tx_cons_sb); | ||
816 | |||
817 | prefetch(&fp->sb_running_index[SM_RX_ID]); | ||
818 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | ||
819 | |||
820 | return IRQ_HANDLED; | ||
821 | } | ||
822 | |||
823 | /* HW Lock for shared dual port PHYs */ | ||
824 | void bnx2x_acquire_phy_lock(struct bnx2x *bp) | ||
825 | { | ||
826 | mutex_lock(&bp->port.phy_mutex); | ||
827 | |||
828 | if (bp->port.need_hw_lock) | ||
829 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); | ||
830 | } | ||
831 | |||
832 | void bnx2x_release_phy_lock(struct bnx2x *bp) | ||
833 | { | ||
834 | if (bp->port.need_hw_lock) | ||
835 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); | ||
836 | |||
837 | mutex_unlock(&bp->port.phy_mutex); | ||
838 | } | ||
839 | |||
840 | /* calculates MF speed according to current linespeed and MF configuration */ | ||
841 | u16 bnx2x_get_mf_speed(struct bnx2x *bp) | ||
842 | { | ||
843 | u16 line_speed = bp->link_vars.line_speed; | ||
844 | if (IS_MF(bp)) { | ||
845 | u16 maxCfg = bnx2x_extract_max_cfg(bp, | ||
846 | bp->mf_config[BP_VN(bp)]); | ||
847 | |||
848 | /* Calculate the current MAX line speed limit for the MF | ||
849 | * devices | ||
850 | */ | ||
851 | if (IS_MF_SI(bp)) | ||
852 | line_speed = (line_speed * maxCfg) / 100; | ||
853 | else { /* SD mode */ | ||
854 | u16 vn_max_rate = maxCfg * 100; | ||
855 | |||
856 | if (vn_max_rate < line_speed) | ||
857 | line_speed = vn_max_rate; | ||
858 | } | ||
859 | } | ||
860 | |||
861 | return line_speed; | ||
862 | } | ||
863 | |||
864 | /** | ||
865 | * bnx2x_fill_report_data - fill link report data to report | ||
866 | * | ||
867 | * @bp: driver handle | ||
868 | * @data: link state to update | ||
869 | * | ||
870 | * It uses a none-atomic bit operations because is called under the mutex. | ||
871 | */ | ||
872 | static inline void bnx2x_fill_report_data(struct bnx2x *bp, | ||
873 | struct bnx2x_link_report_data *data) | ||
874 | { | ||
875 | u16 line_speed = bnx2x_get_mf_speed(bp); | ||
876 | |||
877 | memset(data, 0, sizeof(*data)); | ||
878 | |||
879 | /* Fill the report data: efective line speed */ | ||
880 | data->line_speed = line_speed; | ||
881 | |||
882 | /* Link is down */ | ||
883 | if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) | ||
884 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, | ||
885 | &data->link_report_flags); | ||
886 | |||
887 | /* Full DUPLEX */ | ||
888 | if (bp->link_vars.duplex == DUPLEX_FULL) | ||
889 | __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags); | ||
890 | |||
891 | /* Rx Flow Control is ON */ | ||
892 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) | ||
893 | __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); | ||
894 | |||
895 | /* Tx Flow Control is ON */ | ||
896 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | ||
897 | __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); | ||
898 | } | ||
899 | |||
900 | /** | ||
901 | * bnx2x_link_report - report link status to OS. | ||
902 | * | ||
903 | * @bp: driver handle | ||
904 | * | ||
905 | * Calls the __bnx2x_link_report() under the same locking scheme | ||
906 | * as a link/PHY state managing code to ensure a consistent link | ||
907 | * reporting. | ||
908 | */ | ||
909 | |||
910 | void bnx2x_link_report(struct bnx2x *bp) | ||
911 | { | ||
912 | bnx2x_acquire_phy_lock(bp); | ||
913 | __bnx2x_link_report(bp); | ||
914 | bnx2x_release_phy_lock(bp); | ||
915 | } | ||
916 | |||
917 | /** | ||
918 | * __bnx2x_link_report - report link status to OS. | ||
919 | * | ||
920 | * @bp: driver handle | ||
921 | * | ||
922 | * None atomic inmlementation. | ||
923 | * Should be called under the phy_lock. | ||
924 | */ | ||
925 | void __bnx2x_link_report(struct bnx2x *bp) | ||
926 | { | ||
927 | struct bnx2x_link_report_data cur_data; | ||
928 | |||
929 | /* reread mf_cfg */ | ||
930 | if (!CHIP_IS_E1(bp)) | ||
931 | bnx2x_read_mf_cfg(bp); | ||
932 | |||
933 | /* Read the current link report info */ | ||
934 | bnx2x_fill_report_data(bp, &cur_data); | ||
935 | |||
936 | /* Don't report link down or exactly the same link status twice */ | ||
937 | if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || | ||
938 | (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, | ||
939 | &bp->last_reported_link.link_report_flags) && | ||
940 | test_bit(BNX2X_LINK_REPORT_LINK_DOWN, | ||
941 | &cur_data.link_report_flags))) | ||
942 | return; | ||
943 | |||
944 | bp->link_cnt++; | ||
945 | |||
946 | /* We are going to report a new link parameters now - | ||
947 | * remember the current data for the next time. | ||
948 | */ | ||
949 | memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); | ||
950 | |||
951 | if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, | ||
952 | &cur_data.link_report_flags)) { | ||
953 | netif_carrier_off(bp->dev); | ||
954 | netdev_err(bp->dev, "NIC Link is Down\n"); | ||
955 | return; | ||
956 | } else { | ||
957 | netif_carrier_on(bp->dev); | ||
958 | netdev_info(bp->dev, "NIC Link is Up, "); | ||
959 | pr_cont("%d Mbps ", cur_data.line_speed); | ||
960 | |||
961 | if (test_and_clear_bit(BNX2X_LINK_REPORT_FD, | ||
962 | &cur_data.link_report_flags)) | ||
963 | pr_cont("full duplex"); | ||
964 | else | ||
965 | pr_cont("half duplex"); | ||
966 | |||
967 | /* Handle the FC at the end so that only these flags would be | ||
968 | * possibly set. This way we may easily check if there is no FC | ||
969 | * enabled. | ||
970 | */ | ||
971 | if (cur_data.link_report_flags) { | ||
972 | if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, | ||
973 | &cur_data.link_report_flags)) { | ||
974 | pr_cont(", receive "); | ||
975 | if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, | ||
976 | &cur_data.link_report_flags)) | ||
977 | pr_cont("& transmit "); | ||
978 | } else { | ||
979 | pr_cont(", transmit "); | ||
980 | } | ||
981 | pr_cont("flow control ON"); | ||
982 | } | ||
983 | pr_cont("\n"); | ||
984 | } | ||
985 | } | ||
986 | |||
987 | void bnx2x_init_rx_rings(struct bnx2x *bp) | ||
988 | { | ||
989 | int func = BP_FUNC(bp); | ||
990 | u16 ring_prod; | ||
991 | int i, j; | ||
992 | |||
993 | /* Allocate TPA resources */ | ||
994 | for_each_rx_queue(bp, j) { | ||
995 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
996 | |||
997 | DP(NETIF_MSG_IFUP, | ||
998 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); | ||
999 | |||
1000 | if (!fp->disable_tpa) { | ||
1001 | /* Fill the per-aggregtion pool */ | ||
1002 | for (i = 0; i < MAX_AGG_QS(bp); i++) { | ||
1003 | struct bnx2x_agg_info *tpa_info = | ||
1004 | &fp->tpa_info[i]; | ||
1005 | struct sw_rx_bd *first_buf = | ||
1006 | &tpa_info->first_buf; | ||
1007 | |||
1008 | first_buf->skb = netdev_alloc_skb(bp->dev, | ||
1009 | fp->rx_buf_size); | ||
1010 | if (!first_buf->skb) { | ||
1011 | BNX2X_ERR("Failed to allocate TPA " | ||
1012 | "skb pool for queue[%d] - " | ||
1013 | "disabling TPA on this " | ||
1014 | "queue!\n", j); | ||
1015 | bnx2x_free_tpa_pool(bp, fp, i); | ||
1016 | fp->disable_tpa = 1; | ||
1017 | break; | ||
1018 | } | ||
1019 | dma_unmap_addr_set(first_buf, mapping, 0); | ||
1020 | tpa_info->tpa_state = BNX2X_TPA_STOP; | ||
1021 | } | ||
1022 | |||
1023 | /* "next page" elements initialization */ | ||
1024 | bnx2x_set_next_page_sgl(fp); | ||
1025 | |||
1026 | /* set SGEs bit mask */ | ||
1027 | bnx2x_init_sge_ring_bit_mask(fp); | ||
1028 | |||
1029 | /* Allocate SGEs and initialize the ring elements */ | ||
1030 | for (i = 0, ring_prod = 0; | ||
1031 | i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { | ||
1032 | |||
1033 | if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { | ||
1034 | BNX2X_ERR("was only able to allocate " | ||
1035 | "%d rx sges\n", i); | ||
1036 | BNX2X_ERR("disabling TPA for " | ||
1037 | "queue[%d]\n", j); | ||
1038 | /* Cleanup already allocated elements */ | ||
1039 | bnx2x_free_rx_sge_range(bp, fp, | ||
1040 | ring_prod); | ||
1041 | bnx2x_free_tpa_pool(bp, fp, | ||
1042 | MAX_AGG_QS(bp)); | ||
1043 | fp->disable_tpa = 1; | ||
1044 | ring_prod = 0; | ||
1045 | break; | ||
1046 | } | ||
1047 | ring_prod = NEXT_SGE_IDX(ring_prod); | ||
1048 | } | ||
1049 | |||
1050 | fp->rx_sge_prod = ring_prod; | ||
1051 | } | ||
1052 | } | ||
1053 | |||
1054 | for_each_rx_queue(bp, j) { | ||
1055 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
1056 | |||
1057 | fp->rx_bd_cons = 0; | ||
1058 | |||
1059 | /* Activate BD ring */ | ||
1060 | /* Warning! | ||
1061 | * this will generate an interrupt (to the TSTORM) | ||
1062 | * must only be done after chip is initialized | ||
1063 | */ | ||
1064 | bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, | ||
1065 | fp->rx_sge_prod); | ||
1066 | |||
1067 | if (j != 0) | ||
1068 | continue; | ||
1069 | |||
1070 | if (CHIP_IS_E1(bp)) { | ||
1071 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
1072 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), | ||
1073 | U64_LO(fp->rx_comp_mapping)); | ||
1074 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
1075 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, | ||
1076 | U64_HI(fp->rx_comp_mapping)); | ||
1077 | } | ||
1078 | } | ||
1079 | } | ||
1080 | |||
1081 | static void bnx2x_free_tx_skbs(struct bnx2x *bp) | ||
1082 | { | ||
1083 | int i; | ||
1084 | u8 cos; | ||
1085 | |||
1086 | for_each_tx_queue(bp, i) { | ||
1087 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
1088 | for_each_cos_in_tx_queue(fp, cos) { | ||
1089 | struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; | ||
1090 | |||
1091 | u16 bd_cons = txdata->tx_bd_cons; | ||
1092 | u16 sw_prod = txdata->tx_pkt_prod; | ||
1093 | u16 sw_cons = txdata->tx_pkt_cons; | ||
1094 | |||
1095 | while (sw_cons != sw_prod) { | ||
1096 | bd_cons = bnx2x_free_tx_pkt(bp, txdata, | ||
1097 | TX_BD(sw_cons)); | ||
1098 | sw_cons++; | ||
1099 | } | ||
1100 | } | ||
1101 | } | ||
1102 | } | ||
1103 | |||
1104 | static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) | ||
1105 | { | ||
1106 | struct bnx2x *bp = fp->bp; | ||
1107 | int i; | ||
1108 | |||
1109 | /* ring wasn't allocated */ | ||
1110 | if (fp->rx_buf_ring == NULL) | ||
1111 | return; | ||
1112 | |||
1113 | for (i = 0; i < NUM_RX_BD; i++) { | ||
1114 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; | ||
1115 | struct sk_buff *skb = rx_buf->skb; | ||
1116 | |||
1117 | if (skb == NULL) | ||
1118 | continue; | ||
1119 | dma_unmap_single(&bp->pdev->dev, | ||
1120 | dma_unmap_addr(rx_buf, mapping), | ||
1121 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
1122 | |||
1123 | rx_buf->skb = NULL; | ||
1124 | dev_kfree_skb(skb); | ||
1125 | } | ||
1126 | } | ||
1127 | |||
1128 | static void bnx2x_free_rx_skbs(struct bnx2x *bp) | ||
1129 | { | ||
1130 | int j; | ||
1131 | |||
1132 | for_each_rx_queue(bp, j) { | ||
1133 | struct bnx2x_fastpath *fp = &bp->fp[j]; | ||
1134 | |||
1135 | bnx2x_free_rx_bds(fp); | ||
1136 | |||
1137 | if (!fp->disable_tpa) | ||
1138 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); | ||
1139 | } | ||
1140 | } | ||
1141 | |||
1142 | void bnx2x_free_skbs(struct bnx2x *bp) | ||
1143 | { | ||
1144 | bnx2x_free_tx_skbs(bp); | ||
1145 | bnx2x_free_rx_skbs(bp); | ||
1146 | } | ||
1147 | |||
1148 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) | ||
1149 | { | ||
1150 | /* load old values */ | ||
1151 | u32 mf_cfg = bp->mf_config[BP_VN(bp)]; | ||
1152 | |||
1153 | if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { | ||
1154 | /* leave all but MAX value */ | ||
1155 | mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; | ||
1156 | |||
1157 | /* set new MAX value */ | ||
1158 | mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) | ||
1159 | & FUNC_MF_CFG_MAX_BW_MASK; | ||
1160 | |||
1161 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); | ||
1162 | } | ||
1163 | } | ||
1164 | |||
1165 | /** | ||
1166 | * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors | ||
1167 | * | ||
1168 | * @bp: driver handle | ||
1169 | * @nvecs: number of vectors to be released | ||
1170 | */ | ||
1171 | static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) | ||
1172 | { | ||
1173 | int i, offset = 0; | ||
1174 | |||
1175 | if (nvecs == offset) | ||
1176 | return; | ||
1177 | free_irq(bp->msix_table[offset].vector, bp->dev); | ||
1178 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", | ||
1179 | bp->msix_table[offset].vector); | ||
1180 | offset++; | ||
1181 | #ifdef BCM_CNIC | ||
1182 | if (nvecs == offset) | ||
1183 | return; | ||
1184 | offset++; | ||
1185 | #endif | ||
1186 | |||
1187 | for_each_eth_queue(bp, i) { | ||
1188 | if (nvecs == offset) | ||
1189 | return; | ||
1190 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " | ||
1191 | "irq\n", i, bp->msix_table[offset].vector); | ||
1192 | |||
1193 | free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); | ||
1194 | } | ||
1195 | } | ||
1196 | |||
1197 | void bnx2x_free_irq(struct bnx2x *bp) | ||
1198 | { | ||
1199 | if (bp->flags & USING_MSIX_FLAG) | ||
1200 | bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + | ||
1201 | CNIC_PRESENT + 1); | ||
1202 | else if (bp->flags & USING_MSI_FLAG) | ||
1203 | free_irq(bp->pdev->irq, bp->dev); | ||
1204 | else | ||
1205 | free_irq(bp->pdev->irq, bp->dev); | ||
1206 | } | ||
1207 | |||
1208 | int bnx2x_enable_msix(struct bnx2x *bp) | ||
1209 | { | ||
1210 | int msix_vec = 0, i, rc, req_cnt; | ||
1211 | |||
1212 | bp->msix_table[msix_vec].entry = msix_vec; | ||
1213 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", | ||
1214 | bp->msix_table[0].entry); | ||
1215 | msix_vec++; | ||
1216 | |||
1217 | #ifdef BCM_CNIC | ||
1218 | bp->msix_table[msix_vec].entry = msix_vec; | ||
1219 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n", | ||
1220 | bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); | ||
1221 | msix_vec++; | ||
1222 | #endif | ||
1223 | /* We need separate vectors for ETH queues only (not FCoE) */ | ||
1224 | for_each_eth_queue(bp, i) { | ||
1225 | bp->msix_table[msix_vec].entry = msix_vec; | ||
1226 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " | ||
1227 | "(fastpath #%u)\n", msix_vec, msix_vec, i); | ||
1228 | msix_vec++; | ||
1229 | } | ||
1230 | |||
1231 | req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; | ||
1232 | |||
1233 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); | ||
1234 | |||
1235 | /* | ||
1236 | * reconfigure number of tx/rx queues according to available | ||
1237 | * MSI-X vectors | ||
1238 | */ | ||
1239 | if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { | ||
1240 | /* how less vectors we will have? */ | ||
1241 | int diff = req_cnt - rc; | ||
1242 | |||
1243 | DP(NETIF_MSG_IFUP, | ||
1244 | "Trying to use less MSI-X vectors: %d\n", rc); | ||
1245 | |||
1246 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); | ||
1247 | |||
1248 | if (rc) { | ||
1249 | DP(NETIF_MSG_IFUP, | ||
1250 | "MSI-X is not attainable rc %d\n", rc); | ||
1251 | return rc; | ||
1252 | } | ||
1253 | /* | ||
1254 | * decrease number of queues by number of unallocated entries | ||
1255 | */ | ||
1256 | bp->num_queues -= diff; | ||
1257 | |||
1258 | DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", | ||
1259 | bp->num_queues); | ||
1260 | } else if (rc) { | ||
1261 | /* fall to INTx if not enough memory */ | ||
1262 | if (rc == -ENOMEM) | ||
1263 | bp->flags |= DISABLE_MSI_FLAG; | ||
1264 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); | ||
1265 | return rc; | ||
1266 | } | ||
1267 | |||
1268 | bp->flags |= USING_MSIX_FLAG; | ||
1269 | |||
1270 | return 0; | ||
1271 | } | ||
1272 | |||
1273 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) | ||
1274 | { | ||
1275 | int i, rc, offset = 0; | ||
1276 | |||
1277 | rc = request_irq(bp->msix_table[offset++].vector, | ||
1278 | bnx2x_msix_sp_int, 0, | ||
1279 | bp->dev->name, bp->dev); | ||
1280 | if (rc) { | ||
1281 | BNX2X_ERR("request sp irq failed\n"); | ||
1282 | return -EBUSY; | ||
1283 | } | ||
1284 | |||
1285 | #ifdef BCM_CNIC | ||
1286 | offset++; | ||
1287 | #endif | ||
1288 | for_each_eth_queue(bp, i) { | ||
1289 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
1290 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | ||
1291 | bp->dev->name, i); | ||
1292 | |||
1293 | rc = request_irq(bp->msix_table[offset].vector, | ||
1294 | bnx2x_msix_fp_int, 0, fp->name, fp); | ||
1295 | if (rc) { | ||
1296 | BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, | ||
1297 | bp->msix_table[offset].vector, rc); | ||
1298 | bnx2x_free_msix_irqs(bp, offset); | ||
1299 | return -EBUSY; | ||
1300 | } | ||
1301 | |||
1302 | offset++; | ||
1303 | } | ||
1304 | |||
1305 | i = BNX2X_NUM_ETH_QUEUES(bp); | ||
1306 | offset = 1 + CNIC_PRESENT; | ||
1307 | netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" | ||
1308 | " ... fp[%d] %d\n", | ||
1309 | bp->msix_table[0].vector, | ||
1310 | 0, bp->msix_table[offset].vector, | ||
1311 | i - 1, bp->msix_table[offset + i - 1].vector); | ||
1312 | |||
1313 | return 0; | ||
1314 | } | ||
1315 | |||
1316 | int bnx2x_enable_msi(struct bnx2x *bp) | ||
1317 | { | ||
1318 | int rc; | ||
1319 | |||
1320 | rc = pci_enable_msi(bp->pdev); | ||
1321 | if (rc) { | ||
1322 | DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); | ||
1323 | return -1; | ||
1324 | } | ||
1325 | bp->flags |= USING_MSI_FLAG; | ||
1326 | |||
1327 | return 0; | ||
1328 | } | ||
1329 | |||
1330 | static int bnx2x_req_irq(struct bnx2x *bp) | ||
1331 | { | ||
1332 | unsigned long flags; | ||
1333 | int rc; | ||
1334 | |||
1335 | if (bp->flags & USING_MSI_FLAG) | ||
1336 | flags = 0; | ||
1337 | else | ||
1338 | flags = IRQF_SHARED; | ||
1339 | |||
1340 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, | ||
1341 | bp->dev->name, bp->dev); | ||
1342 | return rc; | ||
1343 | } | ||
1344 | |||
1345 | static inline int bnx2x_setup_irqs(struct bnx2x *bp) | ||
1346 | { | ||
1347 | int rc = 0; | ||
1348 | if (bp->flags & USING_MSIX_FLAG) { | ||
1349 | rc = bnx2x_req_msix_irqs(bp); | ||
1350 | if (rc) | ||
1351 | return rc; | ||
1352 | } else { | ||
1353 | bnx2x_ack_int(bp); | ||
1354 | rc = bnx2x_req_irq(bp); | ||
1355 | if (rc) { | ||
1356 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | ||
1357 | return rc; | ||
1358 | } | ||
1359 | if (bp->flags & USING_MSI_FLAG) { | ||
1360 | bp->dev->irq = bp->pdev->irq; | ||
1361 | netdev_info(bp->dev, "using MSI IRQ %d\n", | ||
1362 | bp->pdev->irq); | ||
1363 | } | ||
1364 | } | ||
1365 | |||
1366 | return 0; | ||
1367 | } | ||
1368 | |||
1369 | static inline void bnx2x_napi_enable(struct bnx2x *bp) | ||
1370 | { | ||
1371 | int i; | ||
1372 | |||
1373 | for_each_rx_queue(bp, i) | ||
1374 | napi_enable(&bnx2x_fp(bp, i, napi)); | ||
1375 | } | ||
1376 | |||
1377 | static inline void bnx2x_napi_disable(struct bnx2x *bp) | ||
1378 | { | ||
1379 | int i; | ||
1380 | |||
1381 | for_each_rx_queue(bp, i) | ||
1382 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
1383 | } | ||
1384 | |||
1385 | void bnx2x_netif_start(struct bnx2x *bp) | ||
1386 | { | ||
1387 | if (netif_running(bp->dev)) { | ||
1388 | bnx2x_napi_enable(bp); | ||
1389 | bnx2x_int_enable(bp); | ||
1390 | if (bp->state == BNX2X_STATE_OPEN) | ||
1391 | netif_tx_wake_all_queues(bp->dev); | ||
1392 | } | ||
1393 | } | ||
1394 | |||
1395 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | ||
1396 | { | ||
1397 | bnx2x_int_disable_sync(bp, disable_hw); | ||
1398 | bnx2x_napi_disable(bp); | ||
1399 | } | ||
1400 | |||
1401 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
1402 | { | ||
1403 | struct bnx2x *bp = netdev_priv(dev); | ||
1404 | |||
1405 | #ifdef BCM_CNIC | ||
1406 | if (!NO_FCOE(bp)) { | ||
1407 | struct ethhdr *hdr = (struct ethhdr *)skb->data; | ||
1408 | u16 ether_type = ntohs(hdr->h_proto); | ||
1409 | |||
1410 | /* Skip VLAN tag if present */ | ||
1411 | if (ether_type == ETH_P_8021Q) { | ||
1412 | struct vlan_ethhdr *vhdr = | ||
1413 | (struct vlan_ethhdr *)skb->data; | ||
1414 | |||
1415 | ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); | ||
1416 | } | ||
1417 | |||
1418 | /* If ethertype is FCoE or FIP - use FCoE ring */ | ||
1419 | if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) | ||
1420 | return bnx2x_fcoe_tx(bp, txq_index); | ||
1421 | } | ||
1422 | #endif | ||
1423 | /* select a non-FCoE queue */ | ||
1424 | return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); | ||
1425 | } | ||
1426 | |||
1427 | void bnx2x_set_num_queues(struct bnx2x *bp) | ||
1428 | { | ||
1429 | switch (bp->multi_mode) { | ||
1430 | case ETH_RSS_MODE_DISABLED: | ||
1431 | bp->num_queues = 1; | ||
1432 | break; | ||
1433 | case ETH_RSS_MODE_REGULAR: | ||
1434 | bp->num_queues = bnx2x_calc_num_queues(bp); | ||
1435 | break; | ||
1436 | |||
1437 | default: | ||
1438 | bp->num_queues = 1; | ||
1439 | break; | ||
1440 | } | ||
1441 | |||
1442 | /* Add special queues */ | ||
1443 | bp->num_queues += NON_ETH_CONTEXT_USE; | ||
1444 | } | ||
1445 | |||
1446 | /** | ||
1447 | * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues | ||
1448 | * | ||
1449 | * @bp: Driver handle | ||
1450 | * | ||
1451 | * We currently support for at most 16 Tx queues for each CoS thus we will | ||
1452 | * allocate a multiple of 16 for ETH L2 rings according to the value of the | ||
1453 | * bp->max_cos. | ||
1454 | * | ||
1455 | * If there is an FCoE L2 queue the appropriate Tx queue will have the next | ||
1456 | * index after all ETH L2 indices. | ||
1457 | * | ||
1458 | * If the actual number of Tx queues (for each CoS) is less than 16 then there | ||
1459 | * will be the holes at the end of each group of 16 ETh L2 indices (0..15, | ||
1460 | * 16..31,...) with indicies that are not coupled with any real Tx queue. | ||
1461 | * | ||
1462 | * The proper configuration of skb->queue_mapping is handled by | ||
1463 | * bnx2x_select_queue() and __skb_tx_hash(). | ||
1464 | * | ||
1465 | * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() | ||
1466 | * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). | ||
1467 | */ | ||
1468 | static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) | ||
1469 | { | ||
1470 | int rc, tx, rx; | ||
1471 | |||
1472 | tx = MAX_TXQS_PER_COS * bp->max_cos; | ||
1473 | rx = BNX2X_NUM_ETH_QUEUES(bp); | ||
1474 | |||
1475 | /* account for fcoe queue */ | ||
1476 | #ifdef BCM_CNIC | ||
1477 | if (!NO_FCOE(bp)) { | ||
1478 | rx += FCOE_PRESENT; | ||
1479 | tx += FCOE_PRESENT; | ||
1480 | } | ||
1481 | #endif | ||
1482 | |||
1483 | rc = netif_set_real_num_tx_queues(bp->dev, tx); | ||
1484 | if (rc) { | ||
1485 | BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); | ||
1486 | return rc; | ||
1487 | } | ||
1488 | rc = netif_set_real_num_rx_queues(bp->dev, rx); | ||
1489 | if (rc) { | ||
1490 | BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); | ||
1491 | return rc; | ||
1492 | } | ||
1493 | |||
1494 | DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n", | ||
1495 | tx, rx); | ||
1496 | |||
1497 | return rc; | ||
1498 | } | ||
1499 | |||
1500 | static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) | ||
1501 | { | ||
1502 | int i; | ||
1503 | |||
1504 | for_each_queue(bp, i) { | ||
1505 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
1506 | |||
1507 | /* Always use a mini-jumbo MTU for the FCoE L2 ring */ | ||
1508 | if (IS_FCOE_IDX(i)) | ||
1509 | /* | ||
1510 | * Although there are no IP frames expected to arrive to | ||
1511 | * this ring we still want to add an | ||
1512 | * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer | ||
1513 | * overrun attack. | ||
1514 | */ | ||
1515 | fp->rx_buf_size = | ||
1516 | BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + | ||
1517 | BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; | ||
1518 | else | ||
1519 | fp->rx_buf_size = | ||
1520 | bp->dev->mtu + ETH_OVREHEAD + | ||
1521 | BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; | ||
1522 | } | ||
1523 | } | ||
1524 | |||
1525 | static inline int bnx2x_init_rss_pf(struct bnx2x *bp) | ||
1526 | { | ||
1527 | int i; | ||
1528 | u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; | ||
1529 | u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); | ||
1530 | |||
1531 | /* | ||
1532 | * Prepare the inital contents fo the indirection table if RSS is | ||
1533 | * enabled | ||
1534 | */ | ||
1535 | if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { | ||
1536 | for (i = 0; i < sizeof(ind_table); i++) | ||
1537 | ind_table[i] = | ||
1538 | bp->fp->cl_id + (i % num_eth_queues); | ||
1539 | } | ||
1540 | |||
1541 | /* | ||
1542 | * For 57710 and 57711 SEARCHER configuration (rss_keys) is | ||
1543 | * per-port, so if explicit configuration is needed , do it only | ||
1544 | * for a PMF. | ||
1545 | * | ||
1546 | * For 57712 and newer on the other hand it's a per-function | ||
1547 | * configuration. | ||
1548 | */ | ||
1549 | return bnx2x_config_rss_pf(bp, ind_table, | ||
1550 | bp->port.pmf || !CHIP_IS_E1x(bp)); | ||
1551 | } | ||
1552 | |||
1553 | int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) | ||
1554 | { | ||
1555 | struct bnx2x_config_rss_params params = {0}; | ||
1556 | int i; | ||
1557 | |||
1558 | /* Although RSS is meaningless when there is a single HW queue we | ||
1559 | * still need it enabled in order to have HW Rx hash generated. | ||
1560 | * | ||
1561 | * if (!is_eth_multi(bp)) | ||
1562 | * bp->multi_mode = ETH_RSS_MODE_DISABLED; | ||
1563 | */ | ||
1564 | |||
1565 | params.rss_obj = &bp->rss_conf_obj; | ||
1566 | |||
1567 | __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); | ||
1568 | |||
1569 | /* RSS mode */ | ||
1570 | switch (bp->multi_mode) { | ||
1571 | case ETH_RSS_MODE_DISABLED: | ||
1572 | __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); | ||
1573 | break; | ||
1574 | case ETH_RSS_MODE_REGULAR: | ||
1575 | __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); | ||
1576 | break; | ||
1577 | case ETH_RSS_MODE_VLAN_PRI: | ||
1578 | __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); | ||
1579 | break; | ||
1580 | case ETH_RSS_MODE_E1HOV_PRI: | ||
1581 | __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); | ||
1582 | break; | ||
1583 | case ETH_RSS_MODE_IP_DSCP: | ||
1584 | __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); | ||
1585 | break; | ||
1586 | default: | ||
1587 | BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); | ||
1588 | return -EINVAL; | ||
1589 | } | ||
1590 | |||
1591 | /* If RSS is enabled */ | ||
1592 | if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { | ||
1593 | /* RSS configuration */ | ||
1594 | __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); | ||
1595 | __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); | ||
1596 | __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); | ||
1597 | __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); | ||
1598 | |||
1599 | /* Hash bits */ | ||
1600 | params.rss_result_mask = MULTI_MASK; | ||
1601 | |||
1602 | memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); | ||
1603 | |||
1604 | if (config_hash) { | ||
1605 | /* RSS keys */ | ||
1606 | for (i = 0; i < sizeof(params.rss_key) / 4; i++) | ||
1607 | params.rss_key[i] = random32(); | ||
1608 | |||
1609 | __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); | ||
1610 | } | ||
1611 | } | ||
1612 | |||
1613 | return bnx2x_config_rss(bp, ¶ms); | ||
1614 | } | ||
1615 | |||
1616 | static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) | ||
1617 | { | ||
1618 | struct bnx2x_func_state_params func_params = {0}; | ||
1619 | |||
1620 | /* Prepare parameters for function state transitions */ | ||
1621 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
1622 | |||
1623 | func_params.f_obj = &bp->func_obj; | ||
1624 | func_params.cmd = BNX2X_F_CMD_HW_INIT; | ||
1625 | |||
1626 | func_params.params.hw_init.load_phase = load_code; | ||
1627 | |||
1628 | return bnx2x_func_state_change(bp, &func_params); | ||
1629 | } | ||
1630 | |||
1631 | /* | ||
1632 | * Cleans the object that have internal lists without sending | ||
1633 | * ramrods. Should be run when interrutps are disabled. | ||
1634 | */ | ||
1635 | static void bnx2x_squeeze_objects(struct bnx2x *bp) | ||
1636 | { | ||
1637 | int rc; | ||
1638 | unsigned long ramrod_flags = 0, vlan_mac_flags = 0; | ||
1639 | struct bnx2x_mcast_ramrod_params rparam = {0}; | ||
1640 | struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; | ||
1641 | |||
1642 | /***************** Cleanup MACs' object first *************************/ | ||
1643 | |||
1644 | /* Wait for completion of requested */ | ||
1645 | __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); | ||
1646 | /* Perform a dry cleanup */ | ||
1647 | __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); | ||
1648 | |||
1649 | /* Clean ETH primary MAC */ | ||
1650 | __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); | ||
1651 | rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, | ||
1652 | &ramrod_flags); | ||
1653 | if (rc != 0) | ||
1654 | BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); | ||
1655 | |||
1656 | /* Cleanup UC list */ | ||
1657 | vlan_mac_flags = 0; | ||
1658 | __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); | ||
1659 | rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, | ||
1660 | &ramrod_flags); | ||
1661 | if (rc != 0) | ||
1662 | BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); | ||
1663 | |||
1664 | /***************** Now clean mcast object *****************************/ | ||
1665 | rparam.mcast_obj = &bp->mcast_obj; | ||
1666 | __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); | ||
1667 | |||
1668 | /* Add a DEL command... */ | ||
1669 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); | ||
1670 | if (rc < 0) | ||
1671 | BNX2X_ERR("Failed to add a new DEL command to a multi-cast " | ||
1672 | "object: %d\n", rc); | ||
1673 | |||
1674 | /* ...and wait until all pending commands are cleared */ | ||
1675 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
1676 | while (rc != 0) { | ||
1677 | if (rc < 0) { | ||
1678 | BNX2X_ERR("Failed to clean multi-cast object: %d\n", | ||
1679 | rc); | ||
1680 | return; | ||
1681 | } | ||
1682 | |||
1683 | rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); | ||
1684 | } | ||
1685 | } | ||
1686 | |||
1687 | #ifndef BNX2X_STOP_ON_ERROR | ||
1688 | #define LOAD_ERROR_EXIT(bp, label) \ | ||
1689 | do { \ | ||
1690 | (bp)->state = BNX2X_STATE_ERROR; \ | ||
1691 | goto label; \ | ||
1692 | } while (0) | ||
1693 | #else | ||
1694 | #define LOAD_ERROR_EXIT(bp, label) \ | ||
1695 | do { \ | ||
1696 | (bp)->state = BNX2X_STATE_ERROR; \ | ||
1697 | (bp)->panic = 1; \ | ||
1698 | return -EBUSY; \ | ||
1699 | } while (0) | ||
1700 | #endif | ||
1701 | |||
1702 | /* must be called with rtnl_lock */ | ||
1703 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | ||
1704 | { | ||
1705 | int port = BP_PORT(bp); | ||
1706 | u32 load_code; | ||
1707 | int i, rc; | ||
1708 | |||
1709 | #ifdef BNX2X_STOP_ON_ERROR | ||
1710 | if (unlikely(bp->panic)) | ||
1711 | return -EPERM; | ||
1712 | #endif | ||
1713 | |||
1714 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | ||
1715 | |||
1716 | /* Set the initial link reported state to link down */ | ||
1717 | bnx2x_acquire_phy_lock(bp); | ||
1718 | memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); | ||
1719 | __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, | ||
1720 | &bp->last_reported_link.link_report_flags); | ||
1721 | bnx2x_release_phy_lock(bp); | ||
1722 | |||
1723 | /* must be called before memory allocation and HW init */ | ||
1724 | bnx2x_ilt_set_info(bp); | ||
1725 | |||
1726 | /* | ||
1727 | * Zero fastpath structures preserving invariants like napi, which are | ||
1728 | * allocated only once, fp index, max_cos, bp pointer. | ||
1729 | * Also set fp->disable_tpa. | ||
1730 | */ | ||
1731 | for_each_queue(bp, i) | ||
1732 | bnx2x_bz_fp(bp, i); | ||
1733 | |||
1734 | |||
1735 | /* Set the receive queues buffer size */ | ||
1736 | bnx2x_set_rx_buf_size(bp); | ||
1737 | |||
1738 | if (bnx2x_alloc_mem(bp)) | ||
1739 | return -ENOMEM; | ||
1740 | |||
1741 | /* As long as bnx2x_alloc_mem() may possibly update | ||
1742 | * bp->num_queues, bnx2x_set_real_num_queues() should always | ||
1743 | * come after it. | ||
1744 | */ | ||
1745 | rc = bnx2x_set_real_num_queues(bp); | ||
1746 | if (rc) { | ||
1747 | BNX2X_ERR("Unable to set real_num_queues\n"); | ||
1748 | LOAD_ERROR_EXIT(bp, load_error0); | ||
1749 | } | ||
1750 | |||
1751 | /* configure multi cos mappings in kernel. | ||
1752 | * this configuration may be overriden by a multi class queue discipline | ||
1753 | * or by a dcbx negotiation result. | ||
1754 | */ | ||
1755 | bnx2x_setup_tc(bp->dev, bp->max_cos); | ||
1756 | |||
1757 | bnx2x_napi_enable(bp); | ||
1758 | |||
1759 | /* Send LOAD_REQUEST command to MCP | ||
1760 | * Returns the type of LOAD command: | ||
1761 | * if it is the first port to be initialized | ||
1762 | * common blocks should be initialized, otherwise - not | ||
1763 | */ | ||
1764 | if (!BP_NOMCP(bp)) { | ||
1765 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); | ||
1766 | if (!load_code) { | ||
1767 | BNX2X_ERR("MCP response failure, aborting\n"); | ||
1768 | rc = -EBUSY; | ||
1769 | LOAD_ERROR_EXIT(bp, load_error1); | ||
1770 | } | ||
1771 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | ||
1772 | rc = -EBUSY; /* other port in diagnostic mode */ | ||
1773 | LOAD_ERROR_EXIT(bp, load_error1); | ||
1774 | } | ||
1775 | |||
1776 | } else { | ||
1777 | int path = BP_PATH(bp); | ||
1778 | |||
1779 | DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", | ||
1780 | path, load_count[path][0], load_count[path][1], | ||
1781 | load_count[path][2]); | ||
1782 | load_count[path][0]++; | ||
1783 | load_count[path][1 + port]++; | ||
1784 | DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", | ||
1785 | path, load_count[path][0], load_count[path][1], | ||
1786 | load_count[path][2]); | ||
1787 | if (load_count[path][0] == 1) | ||
1788 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | ||
1789 | else if (load_count[path][1 + port] == 1) | ||
1790 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | ||
1791 | else | ||
1792 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | ||
1793 | } | ||
1794 | |||
1795 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | ||
1796 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || | ||
1797 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { | ||
1798 | bp->port.pmf = 1; | ||
1799 | /* | ||
1800 | * We need the barrier to ensure the ordering between the | ||
1801 | * writing to bp->port.pmf here and reading it from the | ||
1802 | * bnx2x_periodic_task(). | ||
1803 | */ | ||
1804 | smp_mb(); | ||
1805 | queue_delayed_work(bnx2x_wq, &bp->period_task, 0); | ||
1806 | } else | ||
1807 | bp->port.pmf = 0; | ||
1808 | |||
1809 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
1810 | |||
1811 | /* Init Function state controlling object */ | ||
1812 | bnx2x__init_func_obj(bp); | ||
1813 | |||
1814 | /* Initialize HW */ | ||
1815 | rc = bnx2x_init_hw(bp, load_code); | ||
1816 | if (rc) { | ||
1817 | BNX2X_ERR("HW init failed, aborting\n"); | ||
1818 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); | ||
1819 | LOAD_ERROR_EXIT(bp, load_error2); | ||
1820 | } | ||
1821 | |||
1822 | /* Connect to IRQs */ | ||
1823 | rc = bnx2x_setup_irqs(bp); | ||
1824 | if (rc) { | ||
1825 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); | ||
1826 | LOAD_ERROR_EXIT(bp, load_error2); | ||
1827 | } | ||
1828 | |||
1829 | /* Setup NIC internals and enable interrupts */ | ||
1830 | bnx2x_nic_init(bp, load_code); | ||
1831 | |||
1832 | /* Init per-function objects */ | ||
1833 | bnx2x_init_bp_objs(bp); | ||
1834 | |||
1835 | if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | ||
1836 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && | ||
1837 | (bp->common.shmem2_base)) { | ||
1838 | if (SHMEM2_HAS(bp, dcc_support)) | ||
1839 | SHMEM2_WR(bp, dcc_support, | ||
1840 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | | ||
1841 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); | ||
1842 | } | ||
1843 | |||
1844 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | ||
1845 | rc = bnx2x_func_start(bp); | ||
1846 | if (rc) { | ||
1847 | BNX2X_ERR("Function start failed!\n"); | ||
1848 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); | ||
1849 | LOAD_ERROR_EXIT(bp, load_error3); | ||
1850 | } | ||
1851 | |||
1852 | /* Send LOAD_DONE command to MCP */ | ||
1853 | if (!BP_NOMCP(bp)) { | ||
1854 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); | ||
1855 | if (!load_code) { | ||
1856 | BNX2X_ERR("MCP response failure, aborting\n"); | ||
1857 | rc = -EBUSY; | ||
1858 | LOAD_ERROR_EXIT(bp, load_error3); | ||
1859 | } | ||
1860 | } | ||
1861 | |||
1862 | rc = bnx2x_setup_leading(bp); | ||
1863 | if (rc) { | ||
1864 | BNX2X_ERR("Setup leading failed!\n"); | ||
1865 | LOAD_ERROR_EXIT(bp, load_error3); | ||
1866 | } | ||
1867 | |||
1868 | #ifdef BCM_CNIC | ||
1869 | /* Enable Timer scan */ | ||
1870 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); | ||
1871 | #endif | ||
1872 | |||
1873 | for_each_nondefault_queue(bp, i) { | ||
1874 | rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); | ||
1875 | if (rc) | ||
1876 | LOAD_ERROR_EXIT(bp, load_error4); | ||
1877 | } | ||
1878 | |||
1879 | rc = bnx2x_init_rss_pf(bp); | ||
1880 | if (rc) | ||
1881 | LOAD_ERROR_EXIT(bp, load_error4); | ||
1882 | |||
1883 | /* Now when Clients are configured we are ready to work */ | ||
1884 | bp->state = BNX2X_STATE_OPEN; | ||
1885 | |||
1886 | /* Configure a ucast MAC */ | ||
1887 | rc = bnx2x_set_eth_mac(bp, true); | ||
1888 | if (rc) | ||
1889 | LOAD_ERROR_EXIT(bp, load_error4); | ||
1890 | |||
1891 | if (bp->pending_max) { | ||
1892 | bnx2x_update_max_mf_config(bp, bp->pending_max); | ||
1893 | bp->pending_max = 0; | ||
1894 | } | ||
1895 | |||
1896 | if (bp->port.pmf) | ||
1897 | bnx2x_initial_phy_init(bp, load_mode); | ||
1898 | |||
1899 | /* Start fast path */ | ||
1900 | |||
1901 | /* Initialize Rx filter. */ | ||
1902 | netif_addr_lock_bh(bp->dev); | ||
1903 | bnx2x_set_rx_mode(bp->dev); | ||
1904 | netif_addr_unlock_bh(bp->dev); | ||
1905 | |||
1906 | /* Start the Tx */ | ||
1907 | switch (load_mode) { | ||
1908 | case LOAD_NORMAL: | ||
1909 | /* Tx queue should be only reenabled */ | ||
1910 | netif_tx_wake_all_queues(bp->dev); | ||
1911 | break; | ||
1912 | |||
1913 | case LOAD_OPEN: | ||
1914 | netif_tx_start_all_queues(bp->dev); | ||
1915 | smp_mb__after_clear_bit(); | ||
1916 | break; | ||
1917 | |||
1918 | case LOAD_DIAG: | ||
1919 | bp->state = BNX2X_STATE_DIAG; | ||
1920 | break; | ||
1921 | |||
1922 | default: | ||
1923 | break; | ||
1924 | } | ||
1925 | |||
1926 | if (!bp->port.pmf) | ||
1927 | bnx2x__link_status_update(bp); | ||
1928 | |||
1929 | /* start the timer */ | ||
1930 | mod_timer(&bp->timer, jiffies + bp->current_interval); | ||
1931 | |||
1932 | #ifdef BCM_CNIC | ||
1933 | bnx2x_setup_cnic_irq_info(bp); | ||
1934 | if (bp->state == BNX2X_STATE_OPEN) | ||
1935 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); | ||
1936 | #endif | ||
1937 | bnx2x_inc_load_cnt(bp); | ||
1938 | |||
1939 | /* Wait for all pending SP commands to complete */ | ||
1940 | if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { | ||
1941 | BNX2X_ERR("Timeout waiting for SP elements to complete\n"); | ||
1942 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | ||
1943 | return -EBUSY; | ||
1944 | } | ||
1945 | |||
1946 | bnx2x_dcbx_init(bp); | ||
1947 | return 0; | ||
1948 | |||
1949 | #ifndef BNX2X_STOP_ON_ERROR | ||
1950 | load_error4: | ||
1951 | #ifdef BCM_CNIC | ||
1952 | /* Disable Timer scan */ | ||
1953 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); | ||
1954 | #endif | ||
1955 | load_error3: | ||
1956 | bnx2x_int_disable_sync(bp, 1); | ||
1957 | |||
1958 | /* Clean queueable objects */ | ||
1959 | bnx2x_squeeze_objects(bp); | ||
1960 | |||
1961 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
1962 | bnx2x_free_skbs(bp); | ||
1963 | for_each_rx_queue(bp, i) | ||
1964 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
1965 | |||
1966 | /* Release IRQs */ | ||
1967 | bnx2x_free_irq(bp); | ||
1968 | load_error2: | ||
1969 | if (!BP_NOMCP(bp)) { | ||
1970 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); | ||
1971 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); | ||
1972 | } | ||
1973 | |||
1974 | bp->port.pmf = 0; | ||
1975 | load_error1: | ||
1976 | bnx2x_napi_disable(bp); | ||
1977 | load_error0: | ||
1978 | bnx2x_free_mem(bp); | ||
1979 | |||
1980 | return rc; | ||
1981 | #endif /* ! BNX2X_STOP_ON_ERROR */ | ||
1982 | } | ||
1983 | |||
1984 | /* must be called with rtnl_lock */ | ||
1985 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | ||
1986 | { | ||
1987 | int i; | ||
1988 | bool global = false; | ||
1989 | |||
1990 | if ((bp->state == BNX2X_STATE_CLOSED) || | ||
1991 | (bp->state == BNX2X_STATE_ERROR)) { | ||
1992 | /* We can get here if the driver has been unloaded | ||
1993 | * during parity error recovery and is either waiting for a | ||
1994 | * leader to complete or for other functions to unload and | ||
1995 | * then ifdown has been issued. In this case we want to | ||
1996 | * unload and let other functions to complete a recovery | ||
1997 | * process. | ||
1998 | */ | ||
1999 | bp->recovery_state = BNX2X_RECOVERY_DONE; | ||
2000 | bp->is_leader = 0; | ||
2001 | bnx2x_release_leader_lock(bp); | ||
2002 | smp_mb(); | ||
2003 | |||
2004 | DP(NETIF_MSG_HW, "Releasing a leadership...\n"); | ||
2005 | |||
2006 | return -EINVAL; | ||
2007 | } | ||
2008 | |||
2009 | /* | ||
2010 | * It's important to set the bp->state to the value different from | ||
2011 | * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() | ||
2012 | * may restart the Tx from the NAPI context (see bnx2x_tx_int()). | ||
2013 | */ | ||
2014 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | ||
2015 | smp_mb(); | ||
2016 | |||
2017 | /* Stop Tx */ | ||
2018 | bnx2x_tx_disable(bp); | ||
2019 | |||
2020 | #ifdef BCM_CNIC | ||
2021 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | ||
2022 | #endif | ||
2023 | |||
2024 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
2025 | |||
2026 | del_timer_sync(&bp->timer); | ||
2027 | |||
2028 | /* Set ALWAYS_ALIVE bit in shmem */ | ||
2029 | bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; | ||
2030 | |||
2031 | bnx2x_drv_pulse(bp); | ||
2032 | |||
2033 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | ||
2034 | |||
2035 | /* Cleanup the chip if needed */ | ||
2036 | if (unload_mode != UNLOAD_RECOVERY) | ||
2037 | bnx2x_chip_cleanup(bp, unload_mode); | ||
2038 | else { | ||
2039 | /* Send the UNLOAD_REQUEST to the MCP */ | ||
2040 | bnx2x_send_unload_req(bp, unload_mode); | ||
2041 | |||
2042 | /* | ||
2043 | * Prevent transactions to host from the functions on the | ||
2044 | * engine that doesn't reset global blocks in case of global | ||
2045 | * attention once gloabl blocks are reset and gates are opened | ||
2046 | * (the engine which leader will perform the recovery | ||
2047 | * last). | ||
2048 | */ | ||
2049 | if (!CHIP_IS_E1x(bp)) | ||
2050 | bnx2x_pf_disable(bp); | ||
2051 | |||
2052 | /* Disable HW interrupts, NAPI */ | ||
2053 | bnx2x_netif_stop(bp, 1); | ||
2054 | |||
2055 | /* Release IRQs */ | ||
2056 | bnx2x_free_irq(bp); | ||
2057 | |||
2058 | /* Report UNLOAD_DONE to MCP */ | ||
2059 | bnx2x_send_unload_done(bp); | ||
2060 | } | ||
2061 | |||
2062 | /* | ||
2063 | * At this stage no more interrupts will arrive so we may safly clean | ||
2064 | * the queueable objects here in case they failed to get cleaned so far. | ||
2065 | */ | ||
2066 | bnx2x_squeeze_objects(bp); | ||
2067 | |||
2068 | /* There should be no more pending SP commands at this stage */ | ||
2069 | bp->sp_state = 0; | ||
2070 | |||
2071 | bp->port.pmf = 0; | ||
2072 | |||
2073 | /* Free SKBs, SGEs, TPA pool and driver internals */ | ||
2074 | bnx2x_free_skbs(bp); | ||
2075 | for_each_rx_queue(bp, i) | ||
2076 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | ||
2077 | |||
2078 | bnx2x_free_mem(bp); | ||
2079 | |||
2080 | bp->state = BNX2X_STATE_CLOSED; | ||
2081 | |||
2082 | /* Check if there are pending parity attentions. If there are - set | ||
2083 | * RECOVERY_IN_PROGRESS. | ||
2084 | */ | ||
2085 | if (bnx2x_chk_parity_attn(bp, &global, false)) { | ||
2086 | bnx2x_set_reset_in_progress(bp); | ||
2087 | |||
2088 | /* Set RESET_IS_GLOBAL if needed */ | ||
2089 | if (global) | ||
2090 | bnx2x_set_reset_global(bp); | ||
2091 | } | ||
2092 | |||
2093 | |||
2094 | /* The last driver must disable a "close the gate" if there is no | ||
2095 | * parity attention or "process kill" pending. | ||
2096 | */ | ||
2097 | if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) | ||
2098 | bnx2x_disable_close_the_gate(bp); | ||
2099 | |||
2100 | return 0; | ||
2101 | } | ||
2102 | |||
2103 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | ||
2104 | { | ||
2105 | u16 pmcsr; | ||
2106 | |||
2107 | /* If there is no power capability, silently succeed */ | ||
2108 | if (!bp->pm_cap) { | ||
2109 | DP(NETIF_MSG_HW, "No power capability. Breaking.\n"); | ||
2110 | return 0; | ||
2111 | } | ||
2112 | |||
2113 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
2114 | |||
2115 | switch (state) { | ||
2116 | case PCI_D0: | ||
2117 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | ||
2118 | ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | | ||
2119 | PCI_PM_CTRL_PME_STATUS)); | ||
2120 | |||
2121 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) | ||
2122 | /* delay required during transition out of D3hot */ | ||
2123 | msleep(20); | ||
2124 | break; | ||
2125 | |||
2126 | case PCI_D3hot: | ||
2127 | /* If there are other clients above don't | ||
2128 | shut down the power */ | ||
2129 | if (atomic_read(&bp->pdev->enable_cnt) != 1) | ||
2130 | return 0; | ||
2131 | /* Don't shut down the power for emulation and FPGA */ | ||
2132 | if (CHIP_REV_IS_SLOW(bp)) | ||
2133 | return 0; | ||
2134 | |||
2135 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | ||
2136 | pmcsr |= 3; | ||
2137 | |||
2138 | if (bp->wol) | ||
2139 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; | ||
2140 | |||
2141 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | ||
2142 | pmcsr); | ||
2143 | |||
2144 | /* No more memory access after this point until | ||
2145 | * device is brought back to D0. | ||
2146 | */ | ||
2147 | break; | ||
2148 | |||
2149 | default: | ||
2150 | return -EINVAL; | ||
2151 | } | ||
2152 | return 0; | ||
2153 | } | ||
2154 | |||
2155 | /* | ||
2156 | * net_device service functions | ||
2157 | */ | ||
2158 | int bnx2x_poll(struct napi_struct *napi, int budget) | ||
2159 | { | ||
2160 | int work_done = 0; | ||
2161 | u8 cos; | ||
2162 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, | ||
2163 | napi); | ||
2164 | struct bnx2x *bp = fp->bp; | ||
2165 | |||
2166 | while (1) { | ||
2167 | #ifdef BNX2X_STOP_ON_ERROR | ||
2168 | if (unlikely(bp->panic)) { | ||
2169 | napi_complete(napi); | ||
2170 | return 0; | ||
2171 | } | ||
2172 | #endif | ||
2173 | |||
2174 | for_each_cos_in_tx_queue(fp, cos) | ||
2175 | if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) | ||
2176 | bnx2x_tx_int(bp, &fp->txdata[cos]); | ||
2177 | |||
2178 | |||
2179 | if (bnx2x_has_rx_work(fp)) { | ||
2180 | work_done += bnx2x_rx_int(fp, budget - work_done); | ||
2181 | |||
2182 | /* must not complete if we consumed full budget */ | ||
2183 | if (work_done >= budget) | ||
2184 | break; | ||
2185 | } | ||
2186 | |||
2187 | /* Fall out from the NAPI loop if needed */ | ||
2188 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | ||
2189 | #ifdef BCM_CNIC | ||
2190 | /* No need to update SB for FCoE L2 ring as long as | ||
2191 | * it's connected to the default SB and the SB | ||
2192 | * has been updated when NAPI was scheduled. | ||
2193 | */ | ||
2194 | if (IS_FCOE_FP(fp)) { | ||
2195 | napi_complete(napi); | ||
2196 | break; | ||
2197 | } | ||
2198 | #endif | ||
2199 | |||
2200 | bnx2x_update_fpsb_idx(fp); | ||
2201 | /* bnx2x_has_rx_work() reads the status block, | ||
2202 | * thus we need to ensure that status block indices | ||
2203 | * have been actually read (bnx2x_update_fpsb_idx) | ||
2204 | * prior to this check (bnx2x_has_rx_work) so that | ||
2205 | * we won't write the "newer" value of the status block | ||
2206 | * to IGU (if there was a DMA right after | ||
2207 | * bnx2x_has_rx_work and if there is no rmb, the memory | ||
2208 | * reading (bnx2x_update_fpsb_idx) may be postponed | ||
2209 | * to right before bnx2x_ack_sb). In this case there | ||
2210 | * will never be another interrupt until there is | ||
2211 | * another update of the status block, while there | ||
2212 | * is still unhandled work. | ||
2213 | */ | ||
2214 | rmb(); | ||
2215 | |||
2216 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | ||
2217 | napi_complete(napi); | ||
2218 | /* Re-enable interrupts */ | ||
2219 | DP(NETIF_MSG_HW, | ||
2220 | "Update index to %d\n", fp->fp_hc_idx); | ||
2221 | bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, | ||
2222 | le16_to_cpu(fp->fp_hc_idx), | ||
2223 | IGU_INT_ENABLE, 1); | ||
2224 | break; | ||
2225 | } | ||
2226 | } | ||
2227 | } | ||
2228 | |||
2229 | return work_done; | ||
2230 | } | ||
2231 | |||
2232 | /* we split the first BD into headers and data BDs | ||
2233 | * to ease the pain of our fellow microcode engineers | ||
2234 | * we use one mapping for both BDs | ||
2235 | * So far this has only been observed to happen | ||
2236 | * in Other Operating Systems(TM) | ||
2237 | */ | ||
2238 | static noinline u16 bnx2x_tx_split(struct bnx2x *bp, | ||
2239 | struct bnx2x_fp_txdata *txdata, | ||
2240 | struct sw_tx_bd *tx_buf, | ||
2241 | struct eth_tx_start_bd **tx_bd, u16 hlen, | ||
2242 | u16 bd_prod, int nbd) | ||
2243 | { | ||
2244 | struct eth_tx_start_bd *h_tx_bd = *tx_bd; | ||
2245 | struct eth_tx_bd *d_tx_bd; | ||
2246 | dma_addr_t mapping; | ||
2247 | int old_len = le16_to_cpu(h_tx_bd->nbytes); | ||
2248 | |||
2249 | /* first fix first BD */ | ||
2250 | h_tx_bd->nbd = cpu_to_le16(nbd); | ||
2251 | h_tx_bd->nbytes = cpu_to_le16(hlen); | ||
2252 | |||
2253 | DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " | ||
2254 | "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, | ||
2255 | h_tx_bd->addr_lo, h_tx_bd->nbd); | ||
2256 | |||
2257 | /* now get a new data BD | ||
2258 | * (after the pbd) and fill it */ | ||
2259 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
2260 | d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; | ||
2261 | |||
2262 | mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), | ||
2263 | le32_to_cpu(h_tx_bd->addr_lo)) + hlen; | ||
2264 | |||
2265 | d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
2266 | d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
2267 | d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); | ||
2268 | |||
2269 | /* this marks the BD as one that has no individual mapping */ | ||
2270 | tx_buf->flags |= BNX2X_TSO_SPLIT_BD; | ||
2271 | |||
2272 | DP(NETIF_MSG_TX_QUEUED, | ||
2273 | "TSO split data size is %d (%x:%x)\n", | ||
2274 | d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); | ||
2275 | |||
2276 | /* update tx_bd */ | ||
2277 | *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; | ||
2278 | |||
2279 | return bd_prod; | ||
2280 | } | ||
2281 | |||
2282 | static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) | ||
2283 | { | ||
2284 | if (fix > 0) | ||
2285 | csum = (u16) ~csum_fold(csum_sub(csum, | ||
2286 | csum_partial(t_header - fix, fix, 0))); | ||
2287 | |||
2288 | else if (fix < 0) | ||
2289 | csum = (u16) ~csum_fold(csum_add(csum, | ||
2290 | csum_partial(t_header, -fix, 0))); | ||
2291 | |||
2292 | return swab16(csum); | ||
2293 | } | ||
2294 | |||
2295 | static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) | ||
2296 | { | ||
2297 | u32 rc; | ||
2298 | |||
2299 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
2300 | rc = XMIT_PLAIN; | ||
2301 | |||
2302 | else { | ||
2303 | if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { | ||
2304 | rc = XMIT_CSUM_V6; | ||
2305 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | ||
2306 | rc |= XMIT_CSUM_TCP; | ||
2307 | |||
2308 | } else { | ||
2309 | rc = XMIT_CSUM_V4; | ||
2310 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | ||
2311 | rc |= XMIT_CSUM_TCP; | ||
2312 | } | ||
2313 | } | ||
2314 | |||
2315 | if (skb_is_gso_v6(skb)) | ||
2316 | rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; | ||
2317 | else if (skb_is_gso(skb)) | ||
2318 | rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; | ||
2319 | |||
2320 | return rc; | ||
2321 | } | ||
2322 | |||
2323 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | ||
2324 | /* check if packet requires linearization (packet is too fragmented) | ||
2325 | no need to check fragmentation if page size > 8K (there will be no | ||
2326 | violation to FW restrictions) */ | ||
2327 | static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, | ||
2328 | u32 xmit_type) | ||
2329 | { | ||
2330 | int to_copy = 0; | ||
2331 | int hlen = 0; | ||
2332 | int first_bd_sz = 0; | ||
2333 | |||
2334 | /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ | ||
2335 | if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { | ||
2336 | |||
2337 | if (xmit_type & XMIT_GSO) { | ||
2338 | unsigned short lso_mss = skb_shinfo(skb)->gso_size; | ||
2339 | /* Check if LSO packet needs to be copied: | ||
2340 | 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ | ||
2341 | int wnd_size = MAX_FETCH_BD - 3; | ||
2342 | /* Number of windows to check */ | ||
2343 | int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; | ||
2344 | int wnd_idx = 0; | ||
2345 | int frag_idx = 0; | ||
2346 | u32 wnd_sum = 0; | ||
2347 | |||
2348 | /* Headers length */ | ||
2349 | hlen = (int)(skb_transport_header(skb) - skb->data) + | ||
2350 | tcp_hdrlen(skb); | ||
2351 | |||
2352 | /* Amount of data (w/o headers) on linear part of SKB*/ | ||
2353 | first_bd_sz = skb_headlen(skb) - hlen; | ||
2354 | |||
2355 | wnd_sum = first_bd_sz; | ||
2356 | |||
2357 | /* Calculate the first sum - it's special */ | ||
2358 | for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) | ||
2359 | wnd_sum += | ||
2360 | skb_shinfo(skb)->frags[frag_idx].size; | ||
2361 | |||
2362 | /* If there was data on linear skb data - check it */ | ||
2363 | if (first_bd_sz > 0) { | ||
2364 | if (unlikely(wnd_sum < lso_mss)) { | ||
2365 | to_copy = 1; | ||
2366 | goto exit_lbl; | ||
2367 | } | ||
2368 | |||
2369 | wnd_sum -= first_bd_sz; | ||
2370 | } | ||
2371 | |||
2372 | /* Others are easier: run through the frag list and | ||
2373 | check all windows */ | ||
2374 | for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { | ||
2375 | wnd_sum += | ||
2376 | skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; | ||
2377 | |||
2378 | if (unlikely(wnd_sum < lso_mss)) { | ||
2379 | to_copy = 1; | ||
2380 | break; | ||
2381 | } | ||
2382 | wnd_sum -= | ||
2383 | skb_shinfo(skb)->frags[wnd_idx].size; | ||
2384 | } | ||
2385 | } else { | ||
2386 | /* in non-LSO too fragmented packet should always | ||
2387 | be linearized */ | ||
2388 | to_copy = 1; | ||
2389 | } | ||
2390 | } | ||
2391 | |||
2392 | exit_lbl: | ||
2393 | if (unlikely(to_copy)) | ||
2394 | DP(NETIF_MSG_TX_QUEUED, | ||
2395 | "Linearization IS REQUIRED for %s packet. " | ||
2396 | "num_frags %d hlen %d first_bd_sz %d\n", | ||
2397 | (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", | ||
2398 | skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); | ||
2399 | |||
2400 | return to_copy; | ||
2401 | } | ||
2402 | #endif | ||
2403 | |||
2404 | static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, | ||
2405 | u32 xmit_type) | ||
2406 | { | ||
2407 | *parsing_data |= (skb_shinfo(skb)->gso_size << | ||
2408 | ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & | ||
2409 | ETH_TX_PARSE_BD_E2_LSO_MSS; | ||
2410 | if ((xmit_type & XMIT_GSO_V6) && | ||
2411 | (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) | ||
2412 | *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; | ||
2413 | } | ||
2414 | |||
2415 | /** | ||
2416 | * bnx2x_set_pbd_gso - update PBD in GSO case. | ||
2417 | * | ||
2418 | * @skb: packet skb | ||
2419 | * @pbd: parse BD | ||
2420 | * @xmit_type: xmit flags | ||
2421 | */ | ||
2422 | static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, | ||
2423 | struct eth_tx_parse_bd_e1x *pbd, | ||
2424 | u32 xmit_type) | ||
2425 | { | ||
2426 | pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | ||
2427 | pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); | ||
2428 | pbd->tcp_flags = pbd_tcp_flags(skb); | ||
2429 | |||
2430 | if (xmit_type & XMIT_GSO_V4) { | ||
2431 | pbd->ip_id = swab16(ip_hdr(skb)->id); | ||
2432 | pbd->tcp_pseudo_csum = | ||
2433 | swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
2434 | ip_hdr(skb)->daddr, | ||
2435 | 0, IPPROTO_TCP, 0)); | ||
2436 | |||
2437 | } else | ||
2438 | pbd->tcp_pseudo_csum = | ||
2439 | swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
2440 | &ipv6_hdr(skb)->daddr, | ||
2441 | 0, IPPROTO_TCP, 0)); | ||
2442 | |||
2443 | pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; | ||
2444 | } | ||
2445 | |||
2446 | /** | ||
2447 | * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length | ||
2448 | * | ||
2449 | * @bp: driver handle | ||
2450 | * @skb: packet skb | ||
2451 | * @parsing_data: data to be updated | ||
2452 | * @xmit_type: xmit flags | ||
2453 | * | ||
2454 | * 57712 related | ||
2455 | */ | ||
2456 | static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, | ||
2457 | u32 *parsing_data, u32 xmit_type) | ||
2458 | { | ||
2459 | *parsing_data |= | ||
2460 | ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << | ||
2461 | ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & | ||
2462 | ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; | ||
2463 | |||
2464 | if (xmit_type & XMIT_CSUM_TCP) { | ||
2465 | *parsing_data |= ((tcp_hdrlen(skb) / 4) << | ||
2466 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & | ||
2467 | ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; | ||
2468 | |||
2469 | return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; | ||
2470 | } else | ||
2471 | /* We support checksum offload for TCP and UDP only. | ||
2472 | * No need to pass the UDP header length - it's a constant. | ||
2473 | */ | ||
2474 | return skb_transport_header(skb) + | ||
2475 | sizeof(struct udphdr) - skb->data; | ||
2476 | } | ||
2477 | |||
2478 | static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, | ||
2479 | struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) | ||
2480 | { | ||
2481 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; | ||
2482 | |||
2483 | if (xmit_type & XMIT_CSUM_V4) | ||
2484 | tx_start_bd->bd_flags.as_bitfield |= | ||
2485 | ETH_TX_BD_FLAGS_IP_CSUM; | ||
2486 | else | ||
2487 | tx_start_bd->bd_flags.as_bitfield |= | ||
2488 | ETH_TX_BD_FLAGS_IPV6; | ||
2489 | |||
2490 | if (!(xmit_type & XMIT_CSUM_TCP)) | ||
2491 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; | ||
2492 | } | ||
2493 | |||
2494 | /** | ||
2495 | * bnx2x_set_pbd_csum - update PBD with checksum and return header length | ||
2496 | * | ||
2497 | * @bp: driver handle | ||
2498 | * @skb: packet skb | ||
2499 | * @pbd: parse BD to be updated | ||
2500 | * @xmit_type: xmit flags | ||
2501 | */ | ||
2502 | static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, | ||
2503 | struct eth_tx_parse_bd_e1x *pbd, | ||
2504 | u32 xmit_type) | ||
2505 | { | ||
2506 | u8 hlen = (skb_network_header(skb) - skb->data) >> 1; | ||
2507 | |||
2508 | /* for now NS flag is not used in Linux */ | ||
2509 | pbd->global_data = | ||
2510 | (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << | ||
2511 | ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); | ||
2512 | |||
2513 | pbd->ip_hlen_w = (skb_transport_header(skb) - | ||
2514 | skb_network_header(skb)) >> 1; | ||
2515 | |||
2516 | hlen += pbd->ip_hlen_w; | ||
2517 | |||
2518 | /* We support checksum offload for TCP and UDP only */ | ||
2519 | if (xmit_type & XMIT_CSUM_TCP) | ||
2520 | hlen += tcp_hdrlen(skb) / 2; | ||
2521 | else | ||
2522 | hlen += sizeof(struct udphdr) / 2; | ||
2523 | |||
2524 | pbd->total_hlen_w = cpu_to_le16(hlen); | ||
2525 | hlen = hlen*2; | ||
2526 | |||
2527 | if (xmit_type & XMIT_CSUM_TCP) { | ||
2528 | pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); | ||
2529 | |||
2530 | } else { | ||
2531 | s8 fix = SKB_CS_OFF(skb); /* signed! */ | ||
2532 | |||
2533 | DP(NETIF_MSG_TX_QUEUED, | ||
2534 | "hlen %d fix %d csum before fix %x\n", | ||
2535 | le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); | ||
2536 | |||
2537 | /* HW bug: fixup the CSUM */ | ||
2538 | pbd->tcp_pseudo_csum = | ||
2539 | bnx2x_csum_fix(skb_transport_header(skb), | ||
2540 | SKB_CS(skb), fix); | ||
2541 | |||
2542 | DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", | ||
2543 | pbd->tcp_pseudo_csum); | ||
2544 | } | ||
2545 | |||
2546 | return hlen; | ||
2547 | } | ||
2548 | |||
2549 | /* called with netif_tx_lock | ||
2550 | * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call | ||
2551 | * netif_wake_queue() | ||
2552 | */ | ||
2553 | netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2554 | { | ||
2555 | struct bnx2x *bp = netdev_priv(dev); | ||
2556 | |||
2557 | struct bnx2x_fastpath *fp; | ||
2558 | struct netdev_queue *txq; | ||
2559 | struct bnx2x_fp_txdata *txdata; | ||
2560 | struct sw_tx_bd *tx_buf; | ||
2561 | struct eth_tx_start_bd *tx_start_bd, *first_bd; | ||
2562 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; | ||
2563 | struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; | ||
2564 | struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; | ||
2565 | u32 pbd_e2_parsing_data = 0; | ||
2566 | u16 pkt_prod, bd_prod; | ||
2567 | int nbd, txq_index, fp_index, txdata_index; | ||
2568 | dma_addr_t mapping; | ||
2569 | u32 xmit_type = bnx2x_xmit_type(bp, skb); | ||
2570 | int i; | ||
2571 | u8 hlen = 0; | ||
2572 | __le16 pkt_size = 0; | ||
2573 | struct ethhdr *eth; | ||
2574 | u8 mac_type = UNICAST_ADDRESS; | ||
2575 | |||
2576 | #ifdef BNX2X_STOP_ON_ERROR | ||
2577 | if (unlikely(bp->panic)) | ||
2578 | return NETDEV_TX_BUSY; | ||
2579 | #endif | ||
2580 | |||
2581 | txq_index = skb_get_queue_mapping(skb); | ||
2582 | txq = netdev_get_tx_queue(dev, txq_index); | ||
2583 | |||
2584 | BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); | ||
2585 | |||
2586 | /* decode the fastpath index and the cos index from the txq */ | ||
2587 | fp_index = TXQ_TO_FP(txq_index); | ||
2588 | txdata_index = TXQ_TO_COS(txq_index); | ||
2589 | |||
2590 | #ifdef BCM_CNIC | ||
2591 | /* | ||
2592 | * Override the above for the FCoE queue: | ||
2593 | * - FCoE fp entry is right after the ETH entries. | ||
2594 | * - FCoE L2 queue uses bp->txdata[0] only. | ||
2595 | */ | ||
2596 | if (unlikely(!NO_FCOE(bp) && (txq_index == | ||
2597 | bnx2x_fcoe_tx(bp, txq_index)))) { | ||
2598 | fp_index = FCOE_IDX; | ||
2599 | txdata_index = 0; | ||
2600 | } | ||
2601 | #endif | ||
2602 | |||
2603 | /* enable this debug print to view the transmission queue being used | ||
2604 | DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d", | ||
2605 | txq_index, fp_index, txdata_index); */ | ||
2606 | |||
2607 | /* locate the fastpath and the txdata */ | ||
2608 | fp = &bp->fp[fp_index]; | ||
2609 | txdata = &fp->txdata[txdata_index]; | ||
2610 | |||
2611 | /* enable this debug print to view the tranmission details | ||
2612 | DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d" | ||
2613 | " tx_data ptr %p fp pointer %p", | ||
2614 | txdata->cid, fp_index, txdata_index, txdata, fp); */ | ||
2615 | |||
2616 | if (unlikely(bnx2x_tx_avail(bp, txdata) < | ||
2617 | (skb_shinfo(skb)->nr_frags + 3))) { | ||
2618 | fp->eth_q_stats.driver_xoff++; | ||
2619 | netif_tx_stop_queue(txq); | ||
2620 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | ||
2621 | return NETDEV_TX_BUSY; | ||
2622 | } | ||
2623 | |||
2624 | DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " | ||
2625 | "protocol(%x,%x) gso type %x xmit_type %x\n", | ||
2626 | txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, | ||
2627 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); | ||
2628 | |||
2629 | eth = (struct ethhdr *)skb->data; | ||
2630 | |||
2631 | /* set flag according to packet type (UNICAST_ADDRESS is default)*/ | ||
2632 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { | ||
2633 | if (is_broadcast_ether_addr(eth->h_dest)) | ||
2634 | mac_type = BROADCAST_ADDRESS; | ||
2635 | else | ||
2636 | mac_type = MULTICAST_ADDRESS; | ||
2637 | } | ||
2638 | |||
2639 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | ||
2640 | /* First, check if we need to linearize the skb (due to FW | ||
2641 | restrictions). No need to check fragmentation if page size > 8K | ||
2642 | (there will be no violation to FW restrictions) */ | ||
2643 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { | ||
2644 | /* Statistics of linearization */ | ||
2645 | bp->lin_cnt++; | ||
2646 | if (skb_linearize(skb) != 0) { | ||
2647 | DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " | ||
2648 | "silently dropping this SKB\n"); | ||
2649 | dev_kfree_skb_any(skb); | ||
2650 | return NETDEV_TX_OK; | ||
2651 | } | ||
2652 | } | ||
2653 | #endif | ||
2654 | /* Map skb linear data for DMA */ | ||
2655 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | ||
2656 | skb_headlen(skb), DMA_TO_DEVICE); | ||
2657 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
2658 | DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " | ||
2659 | "silently dropping this SKB\n"); | ||
2660 | dev_kfree_skb_any(skb); | ||
2661 | return NETDEV_TX_OK; | ||
2662 | } | ||
2663 | /* | ||
2664 | Please read carefully. First we use one BD which we mark as start, | ||
2665 | then we have a parsing info BD (used for TSO or xsum), | ||
2666 | and only then we have the rest of the TSO BDs. | ||
2667 | (don't forget to mark the last one as last, | ||
2668 | and to unmap only AFTER you write to the BD ...) | ||
2669 | And above all, all pdb sizes are in words - NOT DWORDS! | ||
2670 | */ | ||
2671 | |||
2672 | /* get current pkt produced now - advance it just before sending packet | ||
2673 | * since mapping of pages may fail and cause packet to be dropped | ||
2674 | */ | ||
2675 | pkt_prod = txdata->tx_pkt_prod; | ||
2676 | bd_prod = TX_BD(txdata->tx_bd_prod); | ||
2677 | |||
2678 | /* get a tx_buf and first BD | ||
2679 | * tx_start_bd may be changed during SPLIT, | ||
2680 | * but first_bd will always stay first | ||
2681 | */ | ||
2682 | tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; | ||
2683 | tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; | ||
2684 | first_bd = tx_start_bd; | ||
2685 | |||
2686 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | ||
2687 | SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, | ||
2688 | mac_type); | ||
2689 | |||
2690 | /* header nbd */ | ||
2691 | SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); | ||
2692 | |||
2693 | /* remember the first BD of the packet */ | ||
2694 | tx_buf->first_bd = txdata->tx_bd_prod; | ||
2695 | tx_buf->skb = skb; | ||
2696 | tx_buf->flags = 0; | ||
2697 | |||
2698 | DP(NETIF_MSG_TX_QUEUED, | ||
2699 | "sending pkt %u @%p next_idx %u bd %u @%p\n", | ||
2700 | pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); | ||
2701 | |||
2702 | if (vlan_tx_tag_present(skb)) { | ||
2703 | tx_start_bd->vlan_or_ethertype = | ||
2704 | cpu_to_le16(vlan_tx_tag_get(skb)); | ||
2705 | tx_start_bd->bd_flags.as_bitfield |= | ||
2706 | (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); | ||
2707 | } else | ||
2708 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); | ||
2709 | |||
2710 | /* turn on parsing and get a BD */ | ||
2711 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
2712 | |||
2713 | if (xmit_type & XMIT_CSUM) | ||
2714 | bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); | ||
2715 | |||
2716 | if (!CHIP_IS_E1x(bp)) { | ||
2717 | pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; | ||
2718 | memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); | ||
2719 | /* Set PBD in checksum offload case */ | ||
2720 | if (xmit_type & XMIT_CSUM) | ||
2721 | hlen = bnx2x_set_pbd_csum_e2(bp, skb, | ||
2722 | &pbd_e2_parsing_data, | ||
2723 | xmit_type); | ||
2724 | if (IS_MF_SI(bp)) { | ||
2725 | /* | ||
2726 | * fill in the MAC addresses in the PBD - for local | ||
2727 | * switching | ||
2728 | */ | ||
2729 | bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, | ||
2730 | &pbd_e2->src_mac_addr_mid, | ||
2731 | &pbd_e2->src_mac_addr_lo, | ||
2732 | eth->h_source); | ||
2733 | bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, | ||
2734 | &pbd_e2->dst_mac_addr_mid, | ||
2735 | &pbd_e2->dst_mac_addr_lo, | ||
2736 | eth->h_dest); | ||
2737 | } | ||
2738 | } else { | ||
2739 | pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; | ||
2740 | memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); | ||
2741 | /* Set PBD in checksum offload case */ | ||
2742 | if (xmit_type & XMIT_CSUM) | ||
2743 | hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); | ||
2744 | |||
2745 | } | ||
2746 | |||
2747 | /* Setup the data pointer of the first BD of the packet */ | ||
2748 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
2749 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
2750 | nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ | ||
2751 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | ||
2752 | pkt_size = tx_start_bd->nbytes; | ||
2753 | |||
2754 | DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" | ||
2755 | " nbytes %d flags %x vlan %x\n", | ||
2756 | tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, | ||
2757 | le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), | ||
2758 | tx_start_bd->bd_flags.as_bitfield, | ||
2759 | le16_to_cpu(tx_start_bd->vlan_or_ethertype)); | ||
2760 | |||
2761 | if (xmit_type & XMIT_GSO) { | ||
2762 | |||
2763 | DP(NETIF_MSG_TX_QUEUED, | ||
2764 | "TSO packet len %d hlen %d total len %d tso size %d\n", | ||
2765 | skb->len, hlen, skb_headlen(skb), | ||
2766 | skb_shinfo(skb)->gso_size); | ||
2767 | |||
2768 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; | ||
2769 | |||
2770 | if (unlikely(skb_headlen(skb) > hlen)) | ||
2771 | bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, | ||
2772 | &tx_start_bd, hlen, | ||
2773 | bd_prod, ++nbd); | ||
2774 | if (!CHIP_IS_E1x(bp)) | ||
2775 | bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, | ||
2776 | xmit_type); | ||
2777 | else | ||
2778 | bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); | ||
2779 | } | ||
2780 | |||
2781 | /* Set the PBD's parsing_data field if not zero | ||
2782 | * (for the chips newer than 57711). | ||
2783 | */ | ||
2784 | if (pbd_e2_parsing_data) | ||
2785 | pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); | ||
2786 | |||
2787 | tx_data_bd = (struct eth_tx_bd *)tx_start_bd; | ||
2788 | |||
2789 | /* Handle fragmented skb */ | ||
2790 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
2791 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
2792 | |||
2793 | mapping = dma_map_page(&bp->pdev->dev, frag->page, | ||
2794 | frag->page_offset, frag->size, | ||
2795 | DMA_TO_DEVICE); | ||
2796 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | ||
2797 | |||
2798 | DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " | ||
2799 | "dropping packet...\n"); | ||
2800 | |||
2801 | /* we need unmap all buffers already mapped | ||
2802 | * for this SKB; | ||
2803 | * first_bd->nbd need to be properly updated | ||
2804 | * before call to bnx2x_free_tx_pkt | ||
2805 | */ | ||
2806 | first_bd->nbd = cpu_to_le16(nbd); | ||
2807 | bnx2x_free_tx_pkt(bp, txdata, | ||
2808 | TX_BD(txdata->tx_pkt_prod)); | ||
2809 | return NETDEV_TX_OK; | ||
2810 | } | ||
2811 | |||
2812 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
2813 | tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; | ||
2814 | if (total_pkt_bd == NULL) | ||
2815 | total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; | ||
2816 | |||
2817 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | ||
2818 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | ||
2819 | tx_data_bd->nbytes = cpu_to_le16(frag->size); | ||
2820 | le16_add_cpu(&pkt_size, frag->size); | ||
2821 | nbd++; | ||
2822 | |||
2823 | DP(NETIF_MSG_TX_QUEUED, | ||
2824 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", | ||
2825 | i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, | ||
2826 | le16_to_cpu(tx_data_bd->nbytes)); | ||
2827 | } | ||
2828 | |||
2829 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); | ||
2830 | |||
2831 | /* update with actual num BDs */ | ||
2832 | first_bd->nbd = cpu_to_le16(nbd); | ||
2833 | |||
2834 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | ||
2835 | |||
2836 | /* now send a tx doorbell, counting the next BD | ||
2837 | * if the packet contains or ends with it | ||
2838 | */ | ||
2839 | if (TX_BD_POFF(bd_prod) < nbd) | ||
2840 | nbd++; | ||
2841 | |||
2842 | /* total_pkt_bytes should be set on the first data BD if | ||
2843 | * it's not an LSO packet and there is more than one | ||
2844 | * data BD. In this case pkt_size is limited by an MTU value. | ||
2845 | * However we prefer to set it for an LSO packet (while we don't | ||
2846 | * have to) in order to save some CPU cycles in a none-LSO | ||
2847 | * case, when we much more care about them. | ||
2848 | */ | ||
2849 | if (total_pkt_bd != NULL) | ||
2850 | total_pkt_bd->total_pkt_bytes = pkt_size; | ||
2851 | |||
2852 | if (pbd_e1x) | ||
2853 | DP(NETIF_MSG_TX_QUEUED, | ||
2854 | "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" | ||
2855 | " tcp_flags %x xsum %x seq %u hlen %u\n", | ||
2856 | pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, | ||
2857 | pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, | ||
2858 | pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, | ||
2859 | le16_to_cpu(pbd_e1x->total_hlen_w)); | ||
2860 | if (pbd_e2) | ||
2861 | DP(NETIF_MSG_TX_QUEUED, | ||
2862 | "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", | ||
2863 | pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, | ||
2864 | pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, | ||
2865 | pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, | ||
2866 | pbd_e2->parsing_data); | ||
2867 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); | ||
2868 | |||
2869 | txdata->tx_pkt_prod++; | ||
2870 | /* | ||
2871 | * Make sure that the BD data is updated before updating the producer | ||
2872 | * since FW might read the BD right after the producer is updated. | ||
2873 | * This is only applicable for weak-ordered memory model archs such | ||
2874 | * as IA-64. The following barrier is also mandatory since FW will | ||
2875 | * assumes packets must have BDs. | ||
2876 | */ | ||
2877 | wmb(); | ||
2878 | |||
2879 | txdata->tx_db.data.prod += nbd; | ||
2880 | barrier(); | ||
2881 | |||
2882 | DOORBELL(bp, txdata->cid, txdata->tx_db.raw); | ||
2883 | |||
2884 | mmiowb(); | ||
2885 | |||
2886 | txdata->tx_bd_prod += nbd; | ||
2887 | |||
2888 | if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { | ||
2889 | netif_tx_stop_queue(txq); | ||
2890 | |||
2891 | /* paired memory barrier is in bnx2x_tx_int(), we have to keep | ||
2892 | * ordering of set_bit() in netif_tx_stop_queue() and read of | ||
2893 | * fp->bd_tx_cons */ | ||
2894 | smp_mb(); | ||
2895 | |||
2896 | fp->eth_q_stats.driver_xoff++; | ||
2897 | if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) | ||
2898 | netif_tx_wake_queue(txq); | ||
2899 | } | ||
2900 | txdata->tx_pkt++; | ||
2901 | |||
2902 | return NETDEV_TX_OK; | ||
2903 | } | ||
2904 | |||
2905 | /** | ||
2906 | * bnx2x_setup_tc - routine to configure net_device for multi tc | ||
2907 | * | ||
2908 | * @netdev: net device to configure | ||
2909 | * @tc: number of traffic classes to enable | ||
2910 | * | ||
2911 | * callback connected to the ndo_setup_tc function pointer | ||
2912 | */ | ||
2913 | int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) | ||
2914 | { | ||
2915 | int cos, prio, count, offset; | ||
2916 | struct bnx2x *bp = netdev_priv(dev); | ||
2917 | |||
2918 | /* setup tc must be called under rtnl lock */ | ||
2919 | ASSERT_RTNL(); | ||
2920 | |||
2921 | /* no traffic classes requested. aborting */ | ||
2922 | if (!num_tc) { | ||
2923 | netdev_reset_tc(dev); | ||
2924 | return 0; | ||
2925 | } | ||
2926 | |||
2927 | /* requested to support too many traffic classes */ | ||
2928 | if (num_tc > bp->max_cos) { | ||
2929 | DP(NETIF_MSG_TX_ERR, "support for too many traffic classes" | ||
2930 | " requested: %d. max supported is %d", | ||
2931 | num_tc, bp->max_cos); | ||
2932 | return -EINVAL; | ||
2933 | } | ||
2934 | |||
2935 | /* declare amount of supported traffic classes */ | ||
2936 | if (netdev_set_num_tc(dev, num_tc)) { | ||
2937 | DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes", | ||
2938 | num_tc); | ||
2939 | return -EINVAL; | ||
2940 | } | ||
2941 | |||
2942 | /* configure priority to traffic class mapping */ | ||
2943 | for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { | ||
2944 | netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); | ||
2945 | DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", | ||
2946 | prio, bp->prio_to_cos[prio]); | ||
2947 | } | ||
2948 | |||
2949 | |||
2950 | /* Use this configuration to diffrentiate tc0 from other COSes | ||
2951 | This can be used for ets or pfc, and save the effort of setting | ||
2952 | up a multio class queue disc or negotiating DCBX with a switch | ||
2953 | netdev_set_prio_tc_map(dev, 0, 0); | ||
2954 | DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0); | ||
2955 | for (prio = 1; prio < 16; prio++) { | ||
2956 | netdev_set_prio_tc_map(dev, prio, 1); | ||
2957 | DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1); | ||
2958 | } */ | ||
2959 | |||
2960 | /* configure traffic class to transmission queue mapping */ | ||
2961 | for (cos = 0; cos < bp->max_cos; cos++) { | ||
2962 | count = BNX2X_NUM_ETH_QUEUES(bp); | ||
2963 | offset = cos * MAX_TXQS_PER_COS; | ||
2964 | netdev_set_tc_queue(dev, cos, count, offset); | ||
2965 | DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d", | ||
2966 | cos, offset, count); | ||
2967 | } | ||
2968 | |||
2969 | return 0; | ||
2970 | } | ||
2971 | |||
2972 | /* called with rtnl_lock */ | ||
2973 | int bnx2x_change_mac_addr(struct net_device *dev, void *p) | ||
2974 | { | ||
2975 | struct sockaddr *addr = p; | ||
2976 | struct bnx2x *bp = netdev_priv(dev); | ||
2977 | int rc = 0; | ||
2978 | |||
2979 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) | ||
2980 | return -EINVAL; | ||
2981 | |||
2982 | if (netif_running(dev)) { | ||
2983 | rc = bnx2x_set_eth_mac(bp, false); | ||
2984 | if (rc) | ||
2985 | return rc; | ||
2986 | } | ||
2987 | |||
2988 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
2989 | |||
2990 | if (netif_running(dev)) | ||
2991 | rc = bnx2x_set_eth_mac(bp, true); | ||
2992 | |||
2993 | return rc; | ||
2994 | } | ||
2995 | |||
2996 | static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) | ||
2997 | { | ||
2998 | union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); | ||
2999 | struct bnx2x_fastpath *fp = &bp->fp[fp_index]; | ||
3000 | u8 cos; | ||
3001 | |||
3002 | /* Common */ | ||
3003 | #ifdef BCM_CNIC | ||
3004 | if (IS_FCOE_IDX(fp_index)) { | ||
3005 | memset(sb, 0, sizeof(union host_hc_status_block)); | ||
3006 | fp->status_blk_mapping = 0; | ||
3007 | |||
3008 | } else { | ||
3009 | #endif | ||
3010 | /* status blocks */ | ||
3011 | if (!CHIP_IS_E1x(bp)) | ||
3012 | BNX2X_PCI_FREE(sb->e2_sb, | ||
3013 | bnx2x_fp(bp, fp_index, | ||
3014 | status_blk_mapping), | ||
3015 | sizeof(struct host_hc_status_block_e2)); | ||
3016 | else | ||
3017 | BNX2X_PCI_FREE(sb->e1x_sb, | ||
3018 | bnx2x_fp(bp, fp_index, | ||
3019 | status_blk_mapping), | ||
3020 | sizeof(struct host_hc_status_block_e1x)); | ||
3021 | #ifdef BCM_CNIC | ||
3022 | } | ||
3023 | #endif | ||
3024 | /* Rx */ | ||
3025 | if (!skip_rx_queue(bp, fp_index)) { | ||
3026 | bnx2x_free_rx_bds(fp); | ||
3027 | |||
3028 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | ||
3029 | BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); | ||
3030 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), | ||
3031 | bnx2x_fp(bp, fp_index, rx_desc_mapping), | ||
3032 | sizeof(struct eth_rx_bd) * NUM_RX_BD); | ||
3033 | |||
3034 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), | ||
3035 | bnx2x_fp(bp, fp_index, rx_comp_mapping), | ||
3036 | sizeof(struct eth_fast_path_rx_cqe) * | ||
3037 | NUM_RCQ_BD); | ||
3038 | |||
3039 | /* SGE ring */ | ||
3040 | BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); | ||
3041 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), | ||
3042 | bnx2x_fp(bp, fp_index, rx_sge_mapping), | ||
3043 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | ||
3044 | } | ||
3045 | |||
3046 | /* Tx */ | ||
3047 | if (!skip_tx_queue(bp, fp_index)) { | ||
3048 | /* fastpath tx rings: tx_buf tx_desc */ | ||
3049 | for_each_cos_in_tx_queue(fp, cos) { | ||
3050 | struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; | ||
3051 | |||
3052 | DP(BNX2X_MSG_SP, | ||
3053 | "freeing tx memory of fp %d cos %d cid %d", | ||
3054 | fp_index, cos, txdata->cid); | ||
3055 | |||
3056 | BNX2X_FREE(txdata->tx_buf_ring); | ||
3057 | BNX2X_PCI_FREE(txdata->tx_desc_ring, | ||
3058 | txdata->tx_desc_mapping, | ||
3059 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); | ||
3060 | } | ||
3061 | } | ||
3062 | /* end of fastpath */ | ||
3063 | } | ||
3064 | |||
3065 | void bnx2x_free_fp_mem(struct bnx2x *bp) | ||
3066 | { | ||
3067 | int i; | ||
3068 | for_each_queue(bp, i) | ||
3069 | bnx2x_free_fp_mem_at(bp, i); | ||
3070 | } | ||
3071 | |||
3072 | static inline void set_sb_shortcuts(struct bnx2x *bp, int index) | ||
3073 | { | ||
3074 | union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); | ||
3075 | if (!CHIP_IS_E1x(bp)) { | ||
3076 | bnx2x_fp(bp, index, sb_index_values) = | ||
3077 | (__le16 *)status_blk.e2_sb->sb.index_values; | ||
3078 | bnx2x_fp(bp, index, sb_running_index) = | ||
3079 | (__le16 *)status_blk.e2_sb->sb.running_index; | ||
3080 | } else { | ||
3081 | bnx2x_fp(bp, index, sb_index_values) = | ||
3082 | (__le16 *)status_blk.e1x_sb->sb.index_values; | ||
3083 | bnx2x_fp(bp, index, sb_running_index) = | ||
3084 | (__le16 *)status_blk.e1x_sb->sb.running_index; | ||
3085 | } | ||
3086 | } | ||
3087 | |||
3088 | static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) | ||
3089 | { | ||
3090 | union host_hc_status_block *sb; | ||
3091 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
3092 | int ring_size = 0; | ||
3093 | u8 cos; | ||
3094 | int rx_ring_size = 0; | ||
3095 | |||
3096 | /* if rx_ring_size specified - use it */ | ||
3097 | if (!bp->rx_ring_size) { | ||
3098 | |||
3099 | rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); | ||
3100 | |||
3101 | /* allocate at least number of buffers required by FW */ | ||
3102 | rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : | ||
3103 | MIN_RX_SIZE_TPA, rx_ring_size); | ||
3104 | |||
3105 | bp->rx_ring_size = rx_ring_size; | ||
3106 | } else | ||
3107 | rx_ring_size = bp->rx_ring_size; | ||
3108 | |||
3109 | /* Common */ | ||
3110 | sb = &bnx2x_fp(bp, index, status_blk); | ||
3111 | #ifdef BCM_CNIC | ||
3112 | if (!IS_FCOE_IDX(index)) { | ||
3113 | #endif | ||
3114 | /* status blocks */ | ||
3115 | if (!CHIP_IS_E1x(bp)) | ||
3116 | BNX2X_PCI_ALLOC(sb->e2_sb, | ||
3117 | &bnx2x_fp(bp, index, status_blk_mapping), | ||
3118 | sizeof(struct host_hc_status_block_e2)); | ||
3119 | else | ||
3120 | BNX2X_PCI_ALLOC(sb->e1x_sb, | ||
3121 | &bnx2x_fp(bp, index, status_blk_mapping), | ||
3122 | sizeof(struct host_hc_status_block_e1x)); | ||
3123 | #ifdef BCM_CNIC | ||
3124 | } | ||
3125 | #endif | ||
3126 | |||
3127 | /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to | ||
3128 | * set shortcuts for it. | ||
3129 | */ | ||
3130 | if (!IS_FCOE_IDX(index)) | ||
3131 | set_sb_shortcuts(bp, index); | ||
3132 | |||
3133 | /* Tx */ | ||
3134 | if (!skip_tx_queue(bp, index)) { | ||
3135 | /* fastpath tx rings: tx_buf tx_desc */ | ||
3136 | for_each_cos_in_tx_queue(fp, cos) { | ||
3137 | struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; | ||
3138 | |||
3139 | DP(BNX2X_MSG_SP, "allocating tx memory of " | ||
3140 | "fp %d cos %d", | ||
3141 | index, cos); | ||
3142 | |||
3143 | BNX2X_ALLOC(txdata->tx_buf_ring, | ||
3144 | sizeof(struct sw_tx_bd) * NUM_TX_BD); | ||
3145 | BNX2X_PCI_ALLOC(txdata->tx_desc_ring, | ||
3146 | &txdata->tx_desc_mapping, | ||
3147 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); | ||
3148 | } | ||
3149 | } | ||
3150 | |||
3151 | /* Rx */ | ||
3152 | if (!skip_rx_queue(bp, index)) { | ||
3153 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | ||
3154 | BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), | ||
3155 | sizeof(struct sw_rx_bd) * NUM_RX_BD); | ||
3156 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), | ||
3157 | &bnx2x_fp(bp, index, rx_desc_mapping), | ||
3158 | sizeof(struct eth_rx_bd) * NUM_RX_BD); | ||
3159 | |||
3160 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), | ||
3161 | &bnx2x_fp(bp, index, rx_comp_mapping), | ||
3162 | sizeof(struct eth_fast_path_rx_cqe) * | ||
3163 | NUM_RCQ_BD); | ||
3164 | |||
3165 | /* SGE ring */ | ||
3166 | BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), | ||
3167 | sizeof(struct sw_rx_page) * NUM_RX_SGE); | ||
3168 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), | ||
3169 | &bnx2x_fp(bp, index, rx_sge_mapping), | ||
3170 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | ||
3171 | /* RX BD ring */ | ||
3172 | bnx2x_set_next_page_rx_bd(fp); | ||
3173 | |||
3174 | /* CQ ring */ | ||
3175 | bnx2x_set_next_page_rx_cq(fp); | ||
3176 | |||
3177 | /* BDs */ | ||
3178 | ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); | ||
3179 | if (ring_size < rx_ring_size) | ||
3180 | goto alloc_mem_err; | ||
3181 | } | ||
3182 | |||
3183 | return 0; | ||
3184 | |||
3185 | /* handles low memory cases */ | ||
3186 | alloc_mem_err: | ||
3187 | BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", | ||
3188 | index, ring_size); | ||
3189 | /* FW will drop all packets if queue is not big enough, | ||
3190 | * In these cases we disable the queue | ||
3191 | * Min size is different for OOO, TPA and non-TPA queues | ||
3192 | */ | ||
3193 | if (ring_size < (fp->disable_tpa ? | ||
3194 | MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { | ||
3195 | /* release memory allocated for this queue */ | ||
3196 | bnx2x_free_fp_mem_at(bp, index); | ||
3197 | return -ENOMEM; | ||
3198 | } | ||
3199 | return 0; | ||
3200 | } | ||
3201 | |||
3202 | int bnx2x_alloc_fp_mem(struct bnx2x *bp) | ||
3203 | { | ||
3204 | int i; | ||
3205 | |||
3206 | /** | ||
3207 | * 1. Allocate FP for leading - fatal if error | ||
3208 | * 2. {CNIC} Allocate FCoE FP - fatal if error | ||
3209 | * 3. {CNIC} Allocate OOO + FWD - disable OOO if error | ||
3210 | * 4. Allocate RSS - fix number of queues if error | ||
3211 | */ | ||
3212 | |||
3213 | /* leading */ | ||
3214 | if (bnx2x_alloc_fp_mem_at(bp, 0)) | ||
3215 | return -ENOMEM; | ||
3216 | |||
3217 | #ifdef BCM_CNIC | ||
3218 | if (!NO_FCOE(bp)) | ||
3219 | /* FCoE */ | ||
3220 | if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) | ||
3221 | /* we will fail load process instead of mark | ||
3222 | * NO_FCOE_FLAG | ||
3223 | */ | ||
3224 | return -ENOMEM; | ||
3225 | #endif | ||
3226 | |||
3227 | /* RSS */ | ||
3228 | for_each_nondefault_eth_queue(bp, i) | ||
3229 | if (bnx2x_alloc_fp_mem_at(bp, i)) | ||
3230 | break; | ||
3231 | |||
3232 | /* handle memory failures */ | ||
3233 | if (i != BNX2X_NUM_ETH_QUEUES(bp)) { | ||
3234 | int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; | ||
3235 | |||
3236 | WARN_ON(delta < 0); | ||
3237 | #ifdef BCM_CNIC | ||
3238 | /** | ||
3239 | * move non eth FPs next to last eth FP | ||
3240 | * must be done in that order | ||
3241 | * FCOE_IDX < FWD_IDX < OOO_IDX | ||
3242 | */ | ||
3243 | |||
3244 | /* move FCoE fp even NO_FCOE_FLAG is on */ | ||
3245 | bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); | ||
3246 | #endif | ||
3247 | bp->num_queues -= delta; | ||
3248 | BNX2X_ERR("Adjusted num of queues from %d to %d\n", | ||
3249 | bp->num_queues + delta, bp->num_queues); | ||
3250 | } | ||
3251 | |||
3252 | return 0; | ||
3253 | } | ||
3254 | |||
3255 | void bnx2x_free_mem_bp(struct bnx2x *bp) | ||
3256 | { | ||
3257 | kfree(bp->fp); | ||
3258 | kfree(bp->msix_table); | ||
3259 | kfree(bp->ilt); | ||
3260 | } | ||
3261 | |||
3262 | int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) | ||
3263 | { | ||
3264 | struct bnx2x_fastpath *fp; | ||
3265 | struct msix_entry *tbl; | ||
3266 | struct bnx2x_ilt *ilt; | ||
3267 | int msix_table_size = 0; | ||
3268 | |||
3269 | /* | ||
3270 | * The biggest MSI-X table we might need is as a maximum number of fast | ||
3271 | * path IGU SBs plus default SB (for PF). | ||
3272 | */ | ||
3273 | msix_table_size = bp->igu_sb_cnt + 1; | ||
3274 | |||
3275 | /* fp array: RSS plus CNIC related L2 queues */ | ||
3276 | fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) * | ||
3277 | sizeof(*fp), GFP_KERNEL); | ||
3278 | if (!fp) | ||
3279 | goto alloc_err; | ||
3280 | bp->fp = fp; | ||
3281 | |||
3282 | /* msix table */ | ||
3283 | tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL); | ||
3284 | if (!tbl) | ||
3285 | goto alloc_err; | ||
3286 | bp->msix_table = tbl; | ||
3287 | |||
3288 | /* ilt */ | ||
3289 | ilt = kzalloc(sizeof(*ilt), GFP_KERNEL); | ||
3290 | if (!ilt) | ||
3291 | goto alloc_err; | ||
3292 | bp->ilt = ilt; | ||
3293 | |||
3294 | return 0; | ||
3295 | alloc_err: | ||
3296 | bnx2x_free_mem_bp(bp); | ||
3297 | return -ENOMEM; | ||
3298 | |||
3299 | } | ||
3300 | |||
3301 | int bnx2x_reload_if_running(struct net_device *dev) | ||
3302 | { | ||
3303 | struct bnx2x *bp = netdev_priv(dev); | ||
3304 | |||
3305 | if (unlikely(!netif_running(dev))) | ||
3306 | return 0; | ||
3307 | |||
3308 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | ||
3309 | return bnx2x_nic_load(bp, LOAD_NORMAL); | ||
3310 | } | ||
3311 | |||
3312 | int bnx2x_get_cur_phy_idx(struct bnx2x *bp) | ||
3313 | { | ||
3314 | u32 sel_phy_idx = 0; | ||
3315 | if (bp->link_params.num_phys <= 1) | ||
3316 | return INT_PHY; | ||
3317 | |||
3318 | if (bp->link_vars.link_up) { | ||
3319 | sel_phy_idx = EXT_PHY1; | ||
3320 | /* In case link is SERDES, check if the EXT_PHY2 is the one */ | ||
3321 | if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && | ||
3322 | (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) | ||
3323 | sel_phy_idx = EXT_PHY2; | ||
3324 | } else { | ||
3325 | |||
3326 | switch (bnx2x_phy_selection(&bp->link_params)) { | ||
3327 | case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: | ||
3328 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: | ||
3329 | case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: | ||
3330 | sel_phy_idx = EXT_PHY1; | ||
3331 | break; | ||
3332 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: | ||
3333 | case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: | ||
3334 | sel_phy_idx = EXT_PHY2; | ||
3335 | break; | ||
3336 | } | ||
3337 | } | ||
3338 | |||
3339 | return sel_phy_idx; | ||
3340 | |||
3341 | } | ||
3342 | int bnx2x_get_link_cfg_idx(struct bnx2x *bp) | ||
3343 | { | ||
3344 | u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); | ||
3345 | /* | ||
3346 | * The selected actived PHY is always after swapping (in case PHY | ||
3347 | * swapping is enabled). So when swapping is enabled, we need to reverse | ||
3348 | * the configuration | ||
3349 | */ | ||
3350 | |||
3351 | if (bp->link_params.multi_phy_config & | ||
3352 | PORT_HW_CFG_PHY_SWAPPED_ENABLED) { | ||
3353 | if (sel_phy_idx == EXT_PHY1) | ||
3354 | sel_phy_idx = EXT_PHY2; | ||
3355 | else if (sel_phy_idx == EXT_PHY2) | ||
3356 | sel_phy_idx = EXT_PHY1; | ||
3357 | } | ||
3358 | return LINK_CONFIG_IDX(sel_phy_idx); | ||
3359 | } | ||
3360 | |||
3361 | #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) | ||
3362 | int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) | ||
3363 | { | ||
3364 | struct bnx2x *bp = netdev_priv(dev); | ||
3365 | struct cnic_eth_dev *cp = &bp->cnic_eth_dev; | ||
3366 | |||
3367 | switch (type) { | ||
3368 | case NETDEV_FCOE_WWNN: | ||
3369 | *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, | ||
3370 | cp->fcoe_wwn_node_name_lo); | ||
3371 | break; | ||
3372 | case NETDEV_FCOE_WWPN: | ||
3373 | *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, | ||
3374 | cp->fcoe_wwn_port_name_lo); | ||
3375 | break; | ||
3376 | default: | ||
3377 | return -EINVAL; | ||
3378 | } | ||
3379 | |||
3380 | return 0; | ||
3381 | } | ||
3382 | #endif | ||
3383 | |||
3384 | /* called with rtnl_lock */ | ||
3385 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | ||
3386 | { | ||
3387 | struct bnx2x *bp = netdev_priv(dev); | ||
3388 | |||
3389 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
3390 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
3391 | return -EAGAIN; | ||
3392 | } | ||
3393 | |||
3394 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || | ||
3395 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) | ||
3396 | return -EINVAL; | ||
3397 | |||
3398 | /* This does not race with packet allocation | ||
3399 | * because the actual alloc size is | ||
3400 | * only updated as part of load | ||
3401 | */ | ||
3402 | dev->mtu = new_mtu; | ||
3403 | |||
3404 | return bnx2x_reload_if_running(dev); | ||
3405 | } | ||
3406 | |||
3407 | u32 bnx2x_fix_features(struct net_device *dev, u32 features) | ||
3408 | { | ||
3409 | struct bnx2x *bp = netdev_priv(dev); | ||
3410 | |||
3411 | /* TPA requires Rx CSUM offloading */ | ||
3412 | if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) | ||
3413 | features &= ~NETIF_F_LRO; | ||
3414 | |||
3415 | return features; | ||
3416 | } | ||
3417 | |||
3418 | int bnx2x_set_features(struct net_device *dev, u32 features) | ||
3419 | { | ||
3420 | struct bnx2x *bp = netdev_priv(dev); | ||
3421 | u32 flags = bp->flags; | ||
3422 | bool bnx2x_reload = false; | ||
3423 | |||
3424 | if (features & NETIF_F_LRO) | ||
3425 | flags |= TPA_ENABLE_FLAG; | ||
3426 | else | ||
3427 | flags &= ~TPA_ENABLE_FLAG; | ||
3428 | |||
3429 | if (features & NETIF_F_LOOPBACK) { | ||
3430 | if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { | ||
3431 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | ||
3432 | bnx2x_reload = true; | ||
3433 | } | ||
3434 | } else { | ||
3435 | if (bp->link_params.loopback_mode != LOOPBACK_NONE) { | ||
3436 | bp->link_params.loopback_mode = LOOPBACK_NONE; | ||
3437 | bnx2x_reload = true; | ||
3438 | } | ||
3439 | } | ||
3440 | |||
3441 | if (flags ^ bp->flags) { | ||
3442 | bp->flags = flags; | ||
3443 | bnx2x_reload = true; | ||
3444 | } | ||
3445 | |||
3446 | if (bnx2x_reload) { | ||
3447 | if (bp->recovery_state == BNX2X_RECOVERY_DONE) | ||
3448 | return bnx2x_reload_if_running(dev); | ||
3449 | /* else: bnx2x_nic_load() will be called at end of recovery */ | ||
3450 | } | ||
3451 | |||
3452 | return 0; | ||
3453 | } | ||
3454 | |||
3455 | void bnx2x_tx_timeout(struct net_device *dev) | ||
3456 | { | ||
3457 | struct bnx2x *bp = netdev_priv(dev); | ||
3458 | |||
3459 | #ifdef BNX2X_STOP_ON_ERROR | ||
3460 | if (!bp->panic) | ||
3461 | bnx2x_panic(); | ||
3462 | #endif | ||
3463 | |||
3464 | smp_mb__before_clear_bit(); | ||
3465 | set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); | ||
3466 | smp_mb__after_clear_bit(); | ||
3467 | |||
3468 | /* This allows the netif to be shutdown gracefully before resetting */ | ||
3469 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
3470 | } | ||
3471 | |||
3472 | int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | ||
3473 | { | ||
3474 | struct net_device *dev = pci_get_drvdata(pdev); | ||
3475 | struct bnx2x *bp; | ||
3476 | |||
3477 | if (!dev) { | ||
3478 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); | ||
3479 | return -ENODEV; | ||
3480 | } | ||
3481 | bp = netdev_priv(dev); | ||
3482 | |||
3483 | rtnl_lock(); | ||
3484 | |||
3485 | pci_save_state(pdev); | ||
3486 | |||
3487 | if (!netif_running(dev)) { | ||
3488 | rtnl_unlock(); | ||
3489 | return 0; | ||
3490 | } | ||
3491 | |||
3492 | netif_device_detach(dev); | ||
3493 | |||
3494 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | ||
3495 | |||
3496 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); | ||
3497 | |||
3498 | rtnl_unlock(); | ||
3499 | |||
3500 | return 0; | ||
3501 | } | ||
3502 | |||
3503 | int bnx2x_resume(struct pci_dev *pdev) | ||
3504 | { | ||
3505 | struct net_device *dev = pci_get_drvdata(pdev); | ||
3506 | struct bnx2x *bp; | ||
3507 | int rc; | ||
3508 | |||
3509 | if (!dev) { | ||
3510 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); | ||
3511 | return -ENODEV; | ||
3512 | } | ||
3513 | bp = netdev_priv(dev); | ||
3514 | |||
3515 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | ||
3516 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | ||
3517 | return -EAGAIN; | ||
3518 | } | ||
3519 | |||
3520 | rtnl_lock(); | ||
3521 | |||
3522 | pci_restore_state(pdev); | ||
3523 | |||
3524 | if (!netif_running(dev)) { | ||
3525 | rtnl_unlock(); | ||
3526 | return 0; | ||
3527 | } | ||
3528 | |||
3529 | bnx2x_set_power_state(bp, PCI_D0); | ||
3530 | netif_device_attach(dev); | ||
3531 | |||
3532 | /* Since the chip was reset, clear the FW sequence number */ | ||
3533 | bp->fw_seq = 0; | ||
3534 | rc = bnx2x_nic_load(bp, LOAD_OPEN); | ||
3535 | |||
3536 | rtnl_unlock(); | ||
3537 | |||
3538 | return rc; | ||
3539 | } | ||
3540 | |||
3541 | |||
3542 | void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, | ||
3543 | u32 cid) | ||
3544 | { | ||
3545 | /* ustorm cxt validation */ | ||
3546 | cxt->ustorm_ag_context.cdu_usage = | ||
3547 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), | ||
3548 | CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); | ||
3549 | /* xcontext validation */ | ||
3550 | cxt->xstorm_ag_context.cdu_reserved = | ||
3551 | CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), | ||
3552 | CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); | ||
3553 | } | ||
3554 | |||
3555 | static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, | ||
3556 | u8 fw_sb_id, u8 sb_index, | ||
3557 | u8 ticks) | ||
3558 | { | ||
3559 | |||
3560 | u32 addr = BAR_CSTRORM_INTMEM + | ||
3561 | CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); | ||
3562 | REG_WR8(bp, addr, ticks); | ||
3563 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", | ||
3564 | port, fw_sb_id, sb_index, ticks); | ||
3565 | } | ||
3566 | |||
3567 | static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | ||
3568 | u16 fw_sb_id, u8 sb_index, | ||
3569 | u8 disable) | ||
3570 | { | ||
3571 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | ||
3572 | u32 addr = BAR_CSTRORM_INTMEM + | ||
3573 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); | ||
3574 | u16 flags = REG_RD16(bp, addr); | ||
3575 | /* clear and set */ | ||
3576 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | ||
3577 | flags |= enable_flag; | ||
3578 | REG_WR16(bp, addr, flags); | ||
3579 | DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", | ||
3580 | port, fw_sb_id, sb_index, disable); | ||
3581 | } | ||
3582 | |||
3583 | void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, | ||
3584 | u8 sb_index, u8 disable, u16 usec) | ||
3585 | { | ||
3586 | int port = BP_PORT(bp); | ||
3587 | u8 ticks = usec / BNX2X_BTR; | ||
3588 | |||
3589 | storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); | ||
3590 | |||
3591 | disable = disable ? 1 : (usec ? 0 : 1); | ||
3592 | storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); | ||
3593 | } | ||