aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDmitry Kravkov <dmitry@broadcom.com>2010-07-27 08:34:34 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-27 23:35:41 -0400
commit9f6c925889ad9204c7d1f5ca116d2e5fd6036c72 (patch)
treeab84e3b050729a1a92b54c1b6ed526cb97f9ad7b /drivers
parentb0efbb996e8554ed8fe59e3f79e0bc83218083ab (diff)
bnx2x: Create bnx2x_cmn.* files
Newly created files have no functionality changes, but includes some functionality from bnx2x_main.c which is common for PF and coming in the future VF driver. Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2x/Makefile2
-rw-r--r--drivers/net/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c2251
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h652
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c2637
5 files changed, 2943 insertions, 2600 deletions
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 46c853b6cc53..ef4eebb38663 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_BNX2X) += bnx2x.o 5obj-$(CONFIG_BNX2X) += bnx2x.o
6 6
7bnx2x-objs := bnx2x_main.o bnx2x_link.o 7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 4afd29201a5c..260507032d39 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -45,6 +45,7 @@
45#endif 45#endif
46 46
47#include <linux/mdio.h> 47#include <linux/mdio.h>
48#include <linux/pci.h>
48#include "bnx2x_reg.h" 49#include "bnx2x_reg.h"
49#include "bnx2x_fw_defs.h" 50#include "bnx2x_fw_defs.h"
50#include "bnx2x_hsi.h" 51#include "bnx2x_hsi.h"
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
new file mode 100644
index 000000000000..30d20c7fee0b
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,2251 @@
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include "bnx2x_cmn.h"
23
24#ifdef BCM_VLAN
25#include <linux/if_vlan.h>
26#endif
27
28static int bnx2x_poll(struct napi_struct *napi, int budget);
29
30/* free skb in the packet ring at pos idx
31 * return idx of last bd freed
32 */
33static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34 u16 idx)
35{
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41 int nbd;
42
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
44 prefetch(&skb->end);
45
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
47 idx, tx_buf, skb);
48
49 /* unmap first bd */
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
53 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
54
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56#ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
59 bnx2x_panic();
60 }
61#endif
62 new_cons = nbd + tx_buf->first_bd;
63
64 /* Get the next bd */
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67 /* Skip a parse bd... */
68 --nbd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 --nbd;
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 }
76
77 /* now free frags */
78 while (nbd > 0) {
79
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 if (--nbd)
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86 }
87
88 /* release skb */
89 WARN_ON(!skb);
90 dev_kfree_skb(skb);
91 tx_buf->first_bd = 0;
92 tx_buf->skb = NULL;
93
94 return new_cons;
95}
96
97int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98{
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103#ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
105 return -1;
106#endif
107
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
111
112 while (sw_cons != hw_cons) {
113 u16 pkt_cons;
114
115 pkt_cons = TX_BD(sw_cons);
116
117 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
118
119 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
120 hw_cons, sw_cons, pkt_cons);
121
122/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
123 rmb();
124 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
125 }
126*/
127 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
128 sw_cons++;
129 }
130
131 fp->tx_pkt_cons = sw_cons;
132 fp->tx_bd_cons = bd_cons;
133
134 /* Need to make the tx_bd_cons update visible to start_xmit()
135 * before checking for netif_tx_queue_stopped(). Without the
136 * memory barrier, there is a small possibility that
137 * start_xmit() will miss it and cause the queue to be stopped
138 * forever.
139 */
140 smp_mb();
141
142 /* TBD need a thresh? */
143 if (unlikely(netif_tx_queue_stopped(txq))) {
144 /* Taking tx_lock() is needed to prevent reenabling the queue
145 * while it's empty. This could have happen if rx_action() gets
146 * suspended in bnx2x_tx_int() after the condition before
147 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
148 *
149 * stops the queue->sees fresh tx_bd_cons->releases the queue->
150 * sends some packets consuming the whole queue again->
151 * stops the queue
152 */
153
154 __netif_tx_lock(txq, smp_processor_id());
155
156 if ((netif_tx_queue_stopped(txq)) &&
157 (bp->state == BNX2X_STATE_OPEN) &&
158 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
159 netif_tx_wake_queue(txq);
160
161 __netif_tx_unlock(txq);
162 }
163 return 0;
164}
165
166static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
167 u16 idx)
168{
169 u16 last_max = fp->last_max_sge;
170
171 if (SUB_S16(idx, last_max) > 0)
172 fp->last_max_sge = idx;
173}
174
175static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
176 struct eth_fast_path_rx_cqe *fp_cqe)
177{
178 struct bnx2x *bp = fp->bp;
179 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
180 le16_to_cpu(fp_cqe->len_on_bd)) >>
181 SGE_PAGE_SHIFT;
182 u16 last_max, last_elem, first_elem;
183 u16 delta = 0;
184 u16 i;
185
186 if (!sge_len)
187 return;
188
189 /* First mark all used pages */
190 for (i = 0; i < sge_len; i++)
191 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
192
193 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
194 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
195
196 /* Here we assume that the last SGE index is the biggest */
197 prefetch((void *)(fp->sge_mask));
198 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
199
200 last_max = RX_SGE(fp->last_max_sge);
201 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
202 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
203
204 /* If ring is not full */
205 if (last_elem + 1 != first_elem)
206 last_elem++;
207
208 /* Now update the prod */
209 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
210 if (likely(fp->sge_mask[i]))
211 break;
212
213 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
214 delta += RX_SGE_MASK_ELEM_SZ;
215 }
216
217 if (delta > 0) {
218 fp->rx_sge_prod += delta;
219 /* clear page-end entries */
220 bnx2x_clear_sge_mask_next_elems(fp);
221 }
222
223 DP(NETIF_MSG_RX_STATUS,
224 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
225 fp->last_max_sge, fp->rx_sge_prod);
226}
227
228static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
229 struct sk_buff *skb, u16 cons, u16 prod)
230{
231 struct bnx2x *bp = fp->bp;
232 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
233 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
234 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
235 dma_addr_t mapping;
236
237 /* move empty skb from pool to prod and map it */
238 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
239 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
240 bp->rx_buf_size, DMA_FROM_DEVICE);
241 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
242
243 /* move partial skb from cons to pool (don't unmap yet) */
244 fp->tpa_pool[queue] = *cons_rx_buf;
245
246 /* mark bin state as start - print error if current state != stop */
247 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
248 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
249
250 fp->tpa_state[queue] = BNX2X_TPA_START;
251
252 /* point prod_bd to new skb */
253 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
254 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
255
256#ifdef BNX2X_STOP_ON_ERROR
257 fp->tpa_queue_used |= (1 << queue);
258#ifdef _ASM_GENERIC_INT_L64_H
259 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
260#else
261 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
262#endif
263 fp->tpa_queue_used);
264#endif
265}
266
267static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
268 struct sk_buff *skb,
269 struct eth_fast_path_rx_cqe *fp_cqe,
270 u16 cqe_idx)
271{
272 struct sw_rx_page *rx_pg, old_rx_pg;
273 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
274 u32 i, frag_len, frag_size, pages;
275 int err;
276 int j;
277
278 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
279 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
280
281 /* This is needed in order to enable forwarding support */
282 if (frag_size)
283 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
284 max(frag_size, (u32)len_on_bd));
285
286#ifdef BNX2X_STOP_ON_ERROR
287 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
289 pages, cqe_idx);
290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
291 fp_cqe->pkt_len, len_on_bd);
292 bnx2x_panic();
293 return -EINVAL;
294 }
295#endif
296
297 /* Run through the SGL and compose the fragmented skb */
298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
300
301 /* FW gives the indices of the SGE as if the ring is an array
302 (meaning that "next" element will consume 2 indices) */
303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
304 rx_pg = &fp->rx_page_ring[sge_idx];
305 old_rx_pg = *rx_pg;
306
307 /* If we fail to allocate a substitute page, we simply stop
308 where we are and drop the whole packet */
309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
310 if (unlikely(err)) {
311 fp->eth_q_stats.rx_skb_alloc_failed++;
312 return err;
313 }
314
315 /* Unmap the page as we r going to pass it to the stack */
316 dma_unmap_page(&bp->pdev->dev,
317 dma_unmap_addr(&old_rx_pg, mapping),
318 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
319
320 /* Add one frag and update the appropriate fields in the skb */
321 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
322
323 skb->data_len += frag_len;
324 skb->truesize += frag_len;
325 skb->len += frag_len;
326
327 frag_size -= frag_len;
328 }
329
330 return 0;
331}
332
333static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
334 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
335 u16 cqe_idx)
336{
337 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
338 struct sk_buff *skb = rx_buf->skb;
339 /* alloc new skb */
340 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
341
342 /* Unmap skb in the pool anyway, as we are going to change
343 pool entry status to BNX2X_TPA_STOP even if new skb allocation
344 fails. */
345 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
346 bp->rx_buf_size, DMA_FROM_DEVICE);
347
348 if (likely(new_skb)) {
349 /* fix ip xsum and give it to the stack */
350 /* (no need to map the new skb) */
351#ifdef BCM_VLAN
352 int is_vlan_cqe =
353 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
354 PARSING_FLAGS_VLAN);
355 int is_not_hwaccel_vlan_cqe =
356 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
357#endif
358
359 prefetch(skb);
360 prefetch(((char *)(skb)) + 128);
361
362#ifdef BNX2X_STOP_ON_ERROR
363 if (pad + len > bp->rx_buf_size) {
364 BNX2X_ERR("skb_put is about to fail... "
365 "pad %d len %d rx_buf_size %d\n",
366 pad, len, bp->rx_buf_size);
367 bnx2x_panic();
368 return;
369 }
370#endif
371
372 skb_reserve(skb, pad);
373 skb_put(skb, len);
374
375 skb->protocol = eth_type_trans(skb, bp->dev);
376 skb->ip_summed = CHECKSUM_UNNECESSARY;
377
378 {
379 struct iphdr *iph;
380
381 iph = (struct iphdr *)skb->data;
382#ifdef BCM_VLAN
383 /* If there is no Rx VLAN offloading -
384 take VLAN tag into an account */
385 if (unlikely(is_not_hwaccel_vlan_cqe))
386 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
387#endif
388 iph->check = 0;
389 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
390 }
391
392 if (!bnx2x_fill_frag_skb(bp, fp, skb,
393 &cqe->fast_path_cqe, cqe_idx)) {
394#ifdef BCM_VLAN
395 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
396 (!is_not_hwaccel_vlan_cqe))
397 vlan_gro_receive(&fp->napi, bp->vlgrp,
398 le16_to_cpu(cqe->fast_path_cqe.
399 vlan_tag), skb);
400 else
401#endif
402 napi_gro_receive(&fp->napi, skb);
403 } else {
404 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
405 " - dropping packet!\n");
406 dev_kfree_skb(skb);
407 }
408
409
410 /* put new skb in bin */
411 fp->tpa_pool[queue].skb = new_skb;
412
413 } else {
414 /* else drop the packet and keep the buffer in the bin */
415 DP(NETIF_MSG_RX_STATUS,
416 "Failed to allocate new skb - dropping packet!\n");
417 fp->eth_q_stats.rx_skb_alloc_failed++;
418 }
419
420 fp->tpa_state[queue] = BNX2X_TPA_STOP;
421}
422
423/* Set Toeplitz hash value in the skb using the value from the
424 * CQE (calculated by HW).
425 */
426static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
427 struct sk_buff *skb)
428{
429 /* Set Toeplitz hash from CQE */
430 if ((bp->dev->features & NETIF_F_RXHASH) &&
431 (cqe->fast_path_cqe.status_flags &
432 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
433 skb->rxhash =
434 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
435}
436
437int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
438{
439 struct bnx2x *bp = fp->bp;
440 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
441 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
442 int rx_pkt = 0;
443
444#ifdef BNX2X_STOP_ON_ERROR
445 if (unlikely(bp->panic))
446 return 0;
447#endif
448
449 /* CQ "next element" is of the size of the regular element,
450 that's why it's ok here */
451 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
452 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
453 hw_comp_cons++;
454
455 bd_cons = fp->rx_bd_cons;
456 bd_prod = fp->rx_bd_prod;
457 bd_prod_fw = bd_prod;
458 sw_comp_cons = fp->rx_comp_cons;
459 sw_comp_prod = fp->rx_comp_prod;
460
461 /* Memory barrier necessary as speculative reads of the rx
462 * buffer can be ahead of the index in the status block
463 */
464 rmb();
465
466 DP(NETIF_MSG_RX_STATUS,
467 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
468 fp->index, hw_comp_cons, sw_comp_cons);
469
470 while (sw_comp_cons != hw_comp_cons) {
471 struct sw_rx_bd *rx_buf = NULL;
472 struct sk_buff *skb;
473 union eth_rx_cqe *cqe;
474 u8 cqe_fp_flags;
475 u16 len, pad;
476
477 comp_ring_cons = RCQ_BD(sw_comp_cons);
478 bd_prod = RX_BD(bd_prod);
479 bd_cons = RX_BD(bd_cons);
480
481 /* Prefetch the page containing the BD descriptor
482 at producer's index. It will be needed when new skb is
483 allocated */
484 prefetch((void *)(PAGE_ALIGN((unsigned long)
485 (&fp->rx_desc_ring[bd_prod])) -
486 PAGE_SIZE + 1));
487
488 cqe = &fp->rx_comp_ring[comp_ring_cons];
489 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
490
491 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
492 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
493 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
494 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
495 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
496 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
497
498 /* is this a slowpath msg? */
499 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
500 bnx2x_sp_event(fp, cqe);
501 goto next_cqe;
502
503 /* this is an rx packet */
504 } else {
505 rx_buf = &fp->rx_buf_ring[bd_cons];
506 skb = rx_buf->skb;
507 prefetch(skb);
508 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
509 pad = cqe->fast_path_cqe.placement_offset;
510
511 /* If CQE is marked both TPA_START and TPA_END
512 it is a non-TPA CQE */
513 if ((!fp->disable_tpa) &&
514 (TPA_TYPE(cqe_fp_flags) !=
515 (TPA_TYPE_START | TPA_TYPE_END))) {
516 u16 queue = cqe->fast_path_cqe.queue_index;
517
518 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
519 DP(NETIF_MSG_RX_STATUS,
520 "calling tpa_start on queue %d\n",
521 queue);
522
523 bnx2x_tpa_start(fp, queue, skb,
524 bd_cons, bd_prod);
525
526 /* Set Toeplitz hash for an LRO skb */
527 bnx2x_set_skb_rxhash(bp, cqe, skb);
528
529 goto next_rx;
530 }
531
532 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
533 DP(NETIF_MSG_RX_STATUS,
534 "calling tpa_stop on queue %d\n",
535 queue);
536
537 if (!BNX2X_RX_SUM_FIX(cqe))
538 BNX2X_ERR("STOP on none TCP "
539 "data\n");
540
541 /* This is a size of the linear data
542 on this skb */
543 len = le16_to_cpu(cqe->fast_path_cqe.
544 len_on_bd);
545 bnx2x_tpa_stop(bp, fp, queue, pad,
546 len, cqe, comp_ring_cons);
547#ifdef BNX2X_STOP_ON_ERROR
548 if (bp->panic)
549 return 0;
550#endif
551
552 bnx2x_update_sge_prod(fp,
553 &cqe->fast_path_cqe);
554 goto next_cqe;
555 }
556 }
557
558 dma_sync_single_for_device(&bp->pdev->dev,
559 dma_unmap_addr(rx_buf, mapping),
560 pad + RX_COPY_THRESH,
561 DMA_FROM_DEVICE);
562 prefetch(((char *)(skb)) + 128);
563
564 /* is this an error packet? */
565 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
566 DP(NETIF_MSG_RX_ERR,
567 "ERROR flags %x rx packet %u\n",
568 cqe_fp_flags, sw_comp_cons);
569 fp->eth_q_stats.rx_err_discard_pkt++;
570 goto reuse_rx;
571 }
572
573 /* Since we don't have a jumbo ring
574 * copy small packets if mtu > 1500
575 */
576 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
577 (len <= RX_COPY_THRESH)) {
578 struct sk_buff *new_skb;
579
580 new_skb = netdev_alloc_skb(bp->dev,
581 len + pad);
582 if (new_skb == NULL) {
583 DP(NETIF_MSG_RX_ERR,
584 "ERROR packet dropped "
585 "because of alloc failure\n");
586 fp->eth_q_stats.rx_skb_alloc_failed++;
587 goto reuse_rx;
588 }
589
590 /* aligned copy */
591 skb_copy_from_linear_data_offset(skb, pad,
592 new_skb->data + pad, len);
593 skb_reserve(new_skb, pad);
594 skb_put(new_skb, len);
595
596 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
597
598 skb = new_skb;
599
600 } else
601 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
602 dma_unmap_single(&bp->pdev->dev,
603 dma_unmap_addr(rx_buf, mapping),
604 bp->rx_buf_size,
605 DMA_FROM_DEVICE);
606 skb_reserve(skb, pad);
607 skb_put(skb, len);
608
609 } else {
610 DP(NETIF_MSG_RX_ERR,
611 "ERROR packet dropped because "
612 "of alloc failure\n");
613 fp->eth_q_stats.rx_skb_alloc_failed++;
614reuse_rx:
615 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
616 goto next_rx;
617 }
618
619 skb->protocol = eth_type_trans(skb, bp->dev);
620
621 /* Set Toeplitz hash for a none-LRO skb */
622 bnx2x_set_skb_rxhash(bp, cqe, skb);
623
624 skb->ip_summed = CHECKSUM_NONE;
625 if (bp->rx_csum) {
626 if (likely(BNX2X_RX_CSUM_OK(cqe)))
627 skb->ip_summed = CHECKSUM_UNNECESSARY;
628 else
629 fp->eth_q_stats.hw_csum_err++;
630 }
631 }
632
633 skb_record_rx_queue(skb, fp->index);
634
635#ifdef BCM_VLAN
636 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
637 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
638 PARSING_FLAGS_VLAN))
639 vlan_gro_receive(&fp->napi, bp->vlgrp,
640 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
641 else
642#endif
643 napi_gro_receive(&fp->napi, skb);
644
645
646next_rx:
647 rx_buf->skb = NULL;
648
649 bd_cons = NEXT_RX_IDX(bd_cons);
650 bd_prod = NEXT_RX_IDX(bd_prod);
651 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
652 rx_pkt++;
653next_cqe:
654 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
655 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
656
657 if (rx_pkt == budget)
658 break;
659 } /* while */
660
661 fp->rx_bd_cons = bd_cons;
662 fp->rx_bd_prod = bd_prod_fw;
663 fp->rx_comp_cons = sw_comp_cons;
664 fp->rx_comp_prod = sw_comp_prod;
665
666 /* Update producers */
667 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
668 fp->rx_sge_prod);
669
670 fp->rx_pkt += rx_pkt;
671 fp->rx_calls++;
672
673 return rx_pkt;
674}
675
676static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
677{
678 struct bnx2x_fastpath *fp = fp_cookie;
679 struct bnx2x *bp = fp->bp;
680
681 /* Return here if interrupt is disabled */
682 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
683 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
684 return IRQ_HANDLED;
685 }
686
687 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
688 fp->index, fp->sb_id);
689 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
690
691#ifdef BNX2X_STOP_ON_ERROR
692 if (unlikely(bp->panic))
693 return IRQ_HANDLED;
694#endif
695
696 /* Handle Rx and Tx according to MSI-X vector */
697 prefetch(fp->rx_cons_sb);
698 prefetch(fp->tx_cons_sb);
699 prefetch(&fp->status_blk->u_status_block.status_block_index);
700 prefetch(&fp->status_blk->c_status_block.status_block_index);
701 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
702
703 return IRQ_HANDLED;
704}
705
706
707/* HW Lock for shared dual port PHYs */
708void bnx2x_acquire_phy_lock(struct bnx2x *bp)
709{
710 mutex_lock(&bp->port.phy_mutex);
711
712 if (bp->port.need_hw_lock)
713 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
714}
715
716void bnx2x_release_phy_lock(struct bnx2x *bp)
717{
718 if (bp->port.need_hw_lock)
719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
720
721 mutex_unlock(&bp->port.phy_mutex);
722}
723
724void bnx2x_link_report(struct bnx2x *bp)
725{
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
729 return;
730 }
731
732 if (bp->link_vars.link_up) {
733 u16 line_speed;
734
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
738
739 line_speed = bp->link_vars.line_speed;
740 if (IS_E1HMF(bp)) {
741 u16 vn_max_rate;
742
743 vn_max_rate =
744 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
745 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
746 if (vn_max_rate < line_speed)
747 line_speed = vn_max_rate;
748 }
749 pr_cont("%d Mbps ", line_speed);
750
751 if (bp->link_vars.duplex == DUPLEX_FULL)
752 pr_cont("full duplex");
753 else
754 pr_cont("half duplex");
755
756 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
757 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
758 pr_cont(", receive ");
759 if (bp->link_vars.flow_ctrl &
760 BNX2X_FLOW_CTRL_TX)
761 pr_cont("& transmit ");
762 } else {
763 pr_cont(", transmit ");
764 }
765 pr_cont("flow control ON");
766 }
767 pr_cont("\n");
768
769 } else { /* link_down */
770 netif_carrier_off(bp->dev);
771 netdev_err(bp->dev, "NIC Link is Down\n");
772 }
773}
774
775void bnx2x_init_rx_rings(struct bnx2x *bp)
776{
777 int func = BP_FUNC(bp);
778 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
779 ETH_MAX_AGGREGATION_QUEUES_E1H;
780 u16 ring_prod, cqe_ring_prod;
781 int i, j;
782
783 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
784 DP(NETIF_MSG_IFUP,
785 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
786
787 if (bp->flags & TPA_ENABLE_FLAG) {
788
789 for_each_queue(bp, j) {
790 struct bnx2x_fastpath *fp = &bp->fp[j];
791
792 for (i = 0; i < max_agg_queues; i++) {
793 fp->tpa_pool[i].skb =
794 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
795 if (!fp->tpa_pool[i].skb) {
796 BNX2X_ERR("Failed to allocate TPA "
797 "skb pool for queue[%d] - "
798 "disabling TPA on this "
799 "queue!\n", j);
800 bnx2x_free_tpa_pool(bp, fp, i);
801 fp->disable_tpa = 1;
802 break;
803 }
804 dma_unmap_addr_set((struct sw_rx_bd *)
805 &bp->fp->tpa_pool[i],
806 mapping, 0);
807 fp->tpa_state[i] = BNX2X_TPA_STOP;
808 }
809 }
810 }
811
812 for_each_queue(bp, j) {
813 struct bnx2x_fastpath *fp = &bp->fp[j];
814
815 fp->rx_bd_cons = 0;
816 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
817 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
818
819 /* "next page" elements initialization */
820 /* SGE ring */
821 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
822 struct eth_rx_sge *sge;
823
824 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
825 sge->addr_hi =
826 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
827 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
828 sge->addr_lo =
829 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
830 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
831 }
832
833 bnx2x_init_sge_ring_bit_mask(fp);
834
835 /* RX BD ring */
836 for (i = 1; i <= NUM_RX_RINGS; i++) {
837 struct eth_rx_bd *rx_bd;
838
839 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
840 rx_bd->addr_hi =
841 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
842 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
843 rx_bd->addr_lo =
844 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
845 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
846 }
847
848 /* CQ ring */
849 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
850 struct eth_rx_cqe_next_page *nextpg;
851
852 nextpg = (struct eth_rx_cqe_next_page *)
853 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
854 nextpg->addr_hi =
855 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
856 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
857 nextpg->addr_lo =
858 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
859 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
860 }
861
862 /* Allocate SGEs and initialize the ring elements */
863 for (i = 0, ring_prod = 0;
864 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
865
866 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
867 BNX2X_ERR("was only able to allocate "
868 "%d rx sges\n", i);
869 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
870 /* Cleanup already allocated elements */
871 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
872 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
873 fp->disable_tpa = 1;
874 ring_prod = 0;
875 break;
876 }
877 ring_prod = NEXT_SGE_IDX(ring_prod);
878 }
879 fp->rx_sge_prod = ring_prod;
880
881 /* Allocate BDs and initialize BD ring */
882 fp->rx_comp_cons = 0;
883 cqe_ring_prod = ring_prod = 0;
884 for (i = 0; i < bp->rx_ring_size; i++) {
885 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
886 BNX2X_ERR("was only able to allocate "
887 "%d rx skbs on queue[%d]\n", i, j);
888 fp->eth_q_stats.rx_skb_alloc_failed++;
889 break;
890 }
891 ring_prod = NEXT_RX_IDX(ring_prod);
892 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
893 WARN_ON(ring_prod <= i);
894 }
895
896 fp->rx_bd_prod = ring_prod;
897 /* must not have more available CQEs than BDs */
898 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
899 cqe_ring_prod);
900 fp->rx_pkt = fp->rx_calls = 0;
901
902 /* Warning!
903 * this will generate an interrupt (to the TSTORM)
904 * must only be done after chip is initialized
905 */
906 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
907 fp->rx_sge_prod);
908 if (j != 0)
909 continue;
910
911 REG_WR(bp, BAR_USTRORM_INTMEM +
912 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
913 U64_LO(fp->rx_comp_mapping));
914 REG_WR(bp, BAR_USTRORM_INTMEM +
915 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
916 U64_HI(fp->rx_comp_mapping));
917 }
918}
919static void bnx2x_free_tx_skbs(struct bnx2x *bp)
920{
921 int i;
922
923 for_each_queue(bp, i) {
924 struct bnx2x_fastpath *fp = &bp->fp[i];
925
926 u16 bd_cons = fp->tx_bd_cons;
927 u16 sw_prod = fp->tx_pkt_prod;
928 u16 sw_cons = fp->tx_pkt_cons;
929
930 while (sw_cons != sw_prod) {
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
932 sw_cons++;
933 }
934 }
935}
936
937static void bnx2x_free_rx_skbs(struct bnx2x *bp)
938{
939 int i, j;
940
941 for_each_queue(bp, j) {
942 struct bnx2x_fastpath *fp = &bp->fp[j];
943
944 for (i = 0; i < NUM_RX_BD; i++) {
945 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
946 struct sk_buff *skb = rx_buf->skb;
947
948 if (skb == NULL)
949 continue;
950
951 dma_unmap_single(&bp->pdev->dev,
952 dma_unmap_addr(rx_buf, mapping),
953 bp->rx_buf_size, DMA_FROM_DEVICE);
954
955 rx_buf->skb = NULL;
956 dev_kfree_skb(skb);
957 }
958 if (!fp->disable_tpa)
959 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
960 ETH_MAX_AGGREGATION_QUEUES_E1 :
961 ETH_MAX_AGGREGATION_QUEUES_E1H);
962 }
963}
964
965void bnx2x_free_skbs(struct bnx2x *bp)
966{
967 bnx2x_free_tx_skbs(bp);
968 bnx2x_free_rx_skbs(bp);
969}
970
971static void bnx2x_free_msix_irqs(struct bnx2x *bp)
972{
973 int i, offset = 1;
974
975 free_irq(bp->msix_table[0].vector, bp->dev);
976 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
977 bp->msix_table[0].vector);
978
979#ifdef BCM_CNIC
980 offset++;
981#endif
982 for_each_queue(bp, i) {
983 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
984 "state %x\n", i, bp->msix_table[i + offset].vector,
985 bnx2x_fp(bp, i, state));
986
987 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
988 }
989}
990
991void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
992{
993 if (bp->flags & USING_MSIX_FLAG) {
994 if (!disable_only)
995 bnx2x_free_msix_irqs(bp);
996 pci_disable_msix(bp->pdev);
997 bp->flags &= ~USING_MSIX_FLAG;
998
999 } else if (bp->flags & USING_MSI_FLAG) {
1000 if (!disable_only)
1001 free_irq(bp->pdev->irq, bp->dev);
1002 pci_disable_msi(bp->pdev);
1003 bp->flags &= ~USING_MSI_FLAG;
1004
1005 } else if (!disable_only)
1006 free_irq(bp->pdev->irq, bp->dev);
1007}
1008
1009static int bnx2x_enable_msix(struct bnx2x *bp)
1010{
1011 int i, rc, offset = 1;
1012 int igu_vec = 0;
1013
1014 bp->msix_table[0].entry = igu_vec;
1015 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1016
1017#ifdef BCM_CNIC
1018 igu_vec = BP_L_ID(bp) + offset;
1019 bp->msix_table[1].entry = igu_vec;
1020 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1021 offset++;
1022#endif
1023 for_each_queue(bp, i) {
1024 igu_vec = BP_L_ID(bp) + offset + i;
1025 bp->msix_table[i + offset].entry = igu_vec;
1026 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1027 "(fastpath #%u)\n", i + offset, igu_vec, i);
1028 }
1029
1030 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1031 BNX2X_NUM_QUEUES(bp) + offset);
1032
1033 /*
1034 * reconfigure number of tx/rx queues according to available
1035 * MSI-X vectors
1036 */
1037 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1038 /* vectors available for FP */
1039 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1040
1041 DP(NETIF_MSG_IFUP,
1042 "Trying to use less MSI-X vectors: %d\n", rc);
1043
1044 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1045
1046 if (rc) {
1047 DP(NETIF_MSG_IFUP,
1048 "MSI-X is not attainable rc %d\n", rc);
1049 return rc;
1050 }
1051
1052 bp->num_queues = min(bp->num_queues, fp_vec);
1053
1054 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1055 bp->num_queues);
1056 } else if (rc) {
1057 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1058 return rc;
1059 }
1060
1061 bp->flags |= USING_MSIX_FLAG;
1062
1063 return 0;
1064}
1065
1066static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1067{
1068 int i, rc, offset = 1;
1069
1070 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1071 bp->dev->name, bp->dev);
1072 if (rc) {
1073 BNX2X_ERR("request sp irq failed\n");
1074 return -EBUSY;
1075 }
1076
1077#ifdef BCM_CNIC
1078 offset++;
1079#endif
1080 for_each_queue(bp, i) {
1081 struct bnx2x_fastpath *fp = &bp->fp[i];
1082 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1083 bp->dev->name, i);
1084
1085 rc = request_irq(bp->msix_table[i + offset].vector,
1086 bnx2x_msix_fp_int, 0, fp->name, fp);
1087 if (rc) {
1088 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1089 bnx2x_free_msix_irqs(bp);
1090 return -EBUSY;
1091 }
1092
1093 fp->state = BNX2X_FP_STATE_IRQ;
1094 }
1095
1096 i = BNX2X_NUM_QUEUES(bp);
1097 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1098 " ... fp[%d] %d\n",
1099 bp->msix_table[0].vector,
1100 0, bp->msix_table[offset].vector,
1101 i - 1, bp->msix_table[offset + i - 1].vector);
1102
1103 return 0;
1104}
1105
1106static int bnx2x_enable_msi(struct bnx2x *bp)
1107{
1108 int rc;
1109
1110 rc = pci_enable_msi(bp->pdev);
1111 if (rc) {
1112 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1113 return -1;
1114 }
1115 bp->flags |= USING_MSI_FLAG;
1116
1117 return 0;
1118}
1119
1120static int bnx2x_req_irq(struct bnx2x *bp)
1121{
1122 unsigned long flags;
1123 int rc;
1124
1125 if (bp->flags & USING_MSI_FLAG)
1126 flags = 0;
1127 else
1128 flags = IRQF_SHARED;
1129
1130 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1131 bp->dev->name, bp->dev);
1132 if (!rc)
1133 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1134
1135 return rc;
1136}
1137
1138static void bnx2x_napi_enable(struct bnx2x *bp)
1139{
1140 int i;
1141
1142 for_each_queue(bp, i)
1143 napi_enable(&bnx2x_fp(bp, i, napi));
1144}
1145
1146static void bnx2x_napi_disable(struct bnx2x *bp)
1147{
1148 int i;
1149
1150 for_each_queue(bp, i)
1151 napi_disable(&bnx2x_fp(bp, i, napi));
1152}
1153
1154void bnx2x_netif_start(struct bnx2x *bp)
1155{
1156 int intr_sem;
1157
1158 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1159 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1160
1161 if (intr_sem) {
1162 if (netif_running(bp->dev)) {
1163 bnx2x_napi_enable(bp);
1164 bnx2x_int_enable(bp);
1165 if (bp->state == BNX2X_STATE_OPEN)
1166 netif_tx_wake_all_queues(bp->dev);
1167 }
1168 }
1169}
1170
1171void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1172{
1173 bnx2x_int_disable_sync(bp, disable_hw);
1174 bnx2x_napi_disable(bp);
1175 netif_tx_disable(bp->dev);
1176}
1177static int bnx2x_set_num_queues(struct bnx2x *bp)
1178{
1179 int rc = 0;
1180
1181 switch (bp->int_mode) {
1182 case INT_MODE_INTx:
1183 case INT_MODE_MSI:
1184 bp->num_queues = 1;
1185 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1186 break;
1187 default:
1188 /* Set number of queues according to bp->multi_mode value */
1189 bnx2x_set_num_queues_msix(bp);
1190
1191 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1192 bp->num_queues);
1193
1194 /* if we can't use MSI-X we only need one fp,
1195 * so try to enable MSI-X with the requested number of fp's
1196 * and fallback to MSI or legacy INTx with one fp
1197 */
1198 rc = bnx2x_enable_msix(bp);
1199 if (rc)
1200 /* failed to enable MSI-X */
1201 bp->num_queues = 1;
1202 break;
1203 }
1204 bp->dev->real_num_tx_queues = bp->num_queues;
1205 return rc;
1206}
1207
1208/* must be called with rtnl_lock */
1209int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1210{
1211 u32 load_code;
1212 int i, rc;
1213
1214#ifdef BNX2X_STOP_ON_ERROR
1215 if (unlikely(bp->panic))
1216 return -EPERM;
1217#endif
1218
1219 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1220
1221 rc = bnx2x_set_num_queues(bp);
1222
1223 if (bnx2x_alloc_mem(bp)) {
1224 bnx2x_free_irq(bp, true);
1225 return -ENOMEM;
1226 }
1227
1228 for_each_queue(bp, i)
1229 bnx2x_fp(bp, i, disable_tpa) =
1230 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1231
1232 for_each_queue(bp, i)
1233 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1234 bnx2x_poll, 128);
1235
1236 bnx2x_napi_enable(bp);
1237
1238 if (bp->flags & USING_MSIX_FLAG) {
1239 rc = bnx2x_req_msix_irqs(bp);
1240 if (rc) {
1241 bnx2x_free_irq(bp, true);
1242 goto load_error1;
1243 }
1244 } else {
1245 /* Fall to INTx if failed to enable MSI-X due to lack of
1246 memory (in bnx2x_set_num_queues()) */
1247 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1248 bnx2x_enable_msi(bp);
1249 bnx2x_ack_int(bp);
1250 rc = bnx2x_req_irq(bp);
1251 if (rc) {
1252 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1253 bnx2x_free_irq(bp, true);
1254 goto load_error1;
1255 }
1256 if (bp->flags & USING_MSI_FLAG) {
1257 bp->dev->irq = bp->pdev->irq;
1258 netdev_info(bp->dev, "using MSI IRQ %d\n",
1259 bp->pdev->irq);
1260 }
1261 }
1262
1263 /* Send LOAD_REQUEST command to MCP
1264 Returns the type of LOAD command:
1265 if it is the first port to be initialized
1266 common blocks should be initialized, otherwise - not
1267 */
1268 if (!BP_NOMCP(bp)) {
1269 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
1270 if (!load_code) {
1271 BNX2X_ERR("MCP response failure, aborting\n");
1272 rc = -EBUSY;
1273 goto load_error2;
1274 }
1275 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1276 rc = -EBUSY; /* other port in diagnostic mode */
1277 goto load_error2;
1278 }
1279
1280 } else {
1281 int port = BP_PORT(bp);
1282
1283 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1284 load_count[0], load_count[1], load_count[2]);
1285 load_count[0]++;
1286 load_count[1 + port]++;
1287 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1288 load_count[0], load_count[1], load_count[2]);
1289 if (load_count[0] == 1)
1290 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1291 else if (load_count[1 + port] == 1)
1292 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1293 else
1294 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1295 }
1296
1297 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1298 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1299 bp->port.pmf = 1;
1300 else
1301 bp->port.pmf = 0;
1302 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1303
1304 /* Initialize HW */
1305 rc = bnx2x_init_hw(bp, load_code);
1306 if (rc) {
1307 BNX2X_ERR("HW init failed, aborting\n");
1308 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1309 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1311 goto load_error2;
1312 }
1313
1314 /* Setup NIC internals and enable interrupts */
1315 bnx2x_nic_init(bp, load_code);
1316
1317 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1318 (bp->common.shmem2_base))
1319 SHMEM2_WR(bp, dcc_support,
1320 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1321 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1322
1323 /* Send LOAD_DONE command to MCP */
1324 if (!BP_NOMCP(bp)) {
1325 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1326 if (!load_code) {
1327 BNX2X_ERR("MCP response failure, aborting\n");
1328 rc = -EBUSY;
1329 goto load_error3;
1330 }
1331 }
1332
1333 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1334
1335 rc = bnx2x_setup_leading(bp);
1336 if (rc) {
1337 BNX2X_ERR("Setup leading failed!\n");
1338#ifndef BNX2X_STOP_ON_ERROR
1339 goto load_error3;
1340#else
1341 bp->panic = 1;
1342 return -EBUSY;
1343#endif
1344 }
1345
1346 if (CHIP_IS_E1H(bp))
1347 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1348 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1349 bp->flags |= MF_FUNC_DIS;
1350 }
1351
1352 if (bp->state == BNX2X_STATE_OPEN) {
1353#ifdef BCM_CNIC
1354 /* Enable Timer scan */
1355 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1356#endif
1357 for_each_nondefault_queue(bp, i) {
1358 rc = bnx2x_setup_multi(bp, i);
1359 if (rc)
1360#ifdef BCM_CNIC
1361 goto load_error4;
1362#else
1363 goto load_error3;
1364#endif
1365 }
1366
1367 if (CHIP_IS_E1(bp))
1368 bnx2x_set_eth_mac_addr_e1(bp, 1);
1369 else
1370 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1371#ifdef BCM_CNIC
1372 /* Set iSCSI L2 MAC */
1373 mutex_lock(&bp->cnic_mutex);
1374 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1375 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1376 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1377 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1378 CNIC_SB_ID(bp));
1379 }
1380 mutex_unlock(&bp->cnic_mutex);
1381#endif
1382 }
1383
1384 if (bp->port.pmf)
1385 bnx2x_initial_phy_init(bp, load_mode);
1386
1387 /* Start fast path */
1388 switch (load_mode) {
1389 case LOAD_NORMAL:
1390 if (bp->state == BNX2X_STATE_OPEN) {
1391 /* Tx queue should be only reenabled */
1392 netif_tx_wake_all_queues(bp->dev);
1393 }
1394 /* Initialize the receive filter. */
1395 bnx2x_set_rx_mode(bp->dev);
1396 break;
1397
1398 case LOAD_OPEN:
1399 netif_tx_start_all_queues(bp->dev);
1400 if (bp->state != BNX2X_STATE_OPEN)
1401 netif_tx_disable(bp->dev);
1402 /* Initialize the receive filter. */
1403 bnx2x_set_rx_mode(bp->dev);
1404 break;
1405
1406 case LOAD_DIAG:
1407 /* Initialize the receive filter. */
1408 bnx2x_set_rx_mode(bp->dev);
1409 bp->state = BNX2X_STATE_DIAG;
1410 break;
1411
1412 default:
1413 break;
1414 }
1415
1416 if (!bp->port.pmf)
1417 bnx2x__link_status_update(bp);
1418
1419 /* start the timer */
1420 mod_timer(&bp->timer, jiffies + bp->current_interval);
1421
1422#ifdef BCM_CNIC
1423 bnx2x_setup_cnic_irq_info(bp);
1424 if (bp->state == BNX2X_STATE_OPEN)
1425 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1426#endif
1427 bnx2x_inc_load_cnt(bp);
1428
1429 return 0;
1430
1431#ifdef BCM_CNIC
1432load_error4:
1433 /* Disable Timer scan */
1434 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1435#endif
1436load_error3:
1437 bnx2x_int_disable_sync(bp, 1);
1438 if (!BP_NOMCP(bp)) {
1439 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1441 }
1442 bp->port.pmf = 0;
1443 /* Free SKBs, SGEs, TPA pool and driver internals */
1444 bnx2x_free_skbs(bp);
1445 for_each_queue(bp, i)
1446 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1447load_error2:
1448 /* Release IRQs */
1449 bnx2x_free_irq(bp, false);
1450load_error1:
1451 bnx2x_napi_disable(bp);
1452 for_each_queue(bp, i)
1453 netif_napi_del(&bnx2x_fp(bp, i, napi));
1454 bnx2x_free_mem(bp);
1455
1456 return rc;
1457}
1458
1459/* must be called with rtnl_lock */
1460int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1461{
1462 int i;
1463
1464 if (bp->state == BNX2X_STATE_CLOSED) {
1465 /* Interface has been removed - nothing to recover */
1466 bp->recovery_state = BNX2X_RECOVERY_DONE;
1467 bp->is_leader = 0;
1468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1469 smp_wmb();
1470
1471 return -EINVAL;
1472 }
1473
1474#ifdef BCM_CNIC
1475 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1476#endif
1477 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1478
1479 /* Set "drop all" */
1480 bp->rx_mode = BNX2X_RX_MODE_NONE;
1481 bnx2x_set_storm_rx_mode(bp);
1482
1483 /* Disable HW interrupts, NAPI and Tx */
1484 bnx2x_netif_stop(bp, 1);
1485 netif_carrier_off(bp->dev);
1486
1487 del_timer_sync(&bp->timer);
1488 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1489 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1490 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1491
1492 /* Release IRQs */
1493 bnx2x_free_irq(bp, false);
1494
1495 /* Cleanup the chip if needed */
1496 if (unload_mode != UNLOAD_RECOVERY)
1497 bnx2x_chip_cleanup(bp, unload_mode);
1498
1499 bp->port.pmf = 0;
1500
1501 /* Free SKBs, SGEs, TPA pool and driver internals */
1502 bnx2x_free_skbs(bp);
1503 for_each_queue(bp, i)
1504 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1505 for_each_queue(bp, i)
1506 netif_napi_del(&bnx2x_fp(bp, i, napi));
1507 bnx2x_free_mem(bp);
1508
1509 bp->state = BNX2X_STATE_CLOSED;
1510
1511 /* The last driver must disable a "close the gate" if there is no
1512 * parity attention or "process kill" pending.
1513 */
1514 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1515 bnx2x_reset_is_done(bp))
1516 bnx2x_disable_close_the_gate(bp);
1517
1518 /* Reset MCP mail box sequence if there is on going recovery */
1519 if (unload_mode == UNLOAD_RECOVERY)
1520 bp->fw_seq = 0;
1521
1522 return 0;
1523}
1524int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1525{
1526 u16 pmcsr;
1527
1528 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1529
1530 switch (state) {
1531 case PCI_D0:
1532 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1533 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1534 PCI_PM_CTRL_PME_STATUS));
1535
1536 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1537 /* delay required during transition out of D3hot */
1538 msleep(20);
1539 break;
1540
1541 case PCI_D3hot:
1542 /* If there are other clients above don't
1543 shut down the power */
1544 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1545 return 0;
1546 /* Don't shut down the power for emulation and FPGA */
1547 if (CHIP_REV_IS_SLOW(bp))
1548 return 0;
1549
1550 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1551 pmcsr |= 3;
1552
1553 if (bp->wol)
1554 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1555
1556 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1557 pmcsr);
1558
1559 /* No more memory access after this point until
1560 * device is brought back to D0.
1561 */
1562 break;
1563
1564 default:
1565 return -EINVAL;
1566 }
1567 return 0;
1568}
1569
1570
1571
1572/*
1573 * net_device service functions
1574 */
1575
1576static int bnx2x_poll(struct napi_struct *napi, int budget)
1577{
1578 int work_done = 0;
1579 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1580 napi);
1581 struct bnx2x *bp = fp->bp;
1582
1583 while (1) {
1584#ifdef BNX2X_STOP_ON_ERROR
1585 if (unlikely(bp->panic)) {
1586 napi_complete(napi);
1587 return 0;
1588 }
1589#endif
1590
1591 if (bnx2x_has_tx_work(fp))
1592 bnx2x_tx_int(fp);
1593
1594 if (bnx2x_has_rx_work(fp)) {
1595 work_done += bnx2x_rx_int(fp, budget - work_done);
1596
1597 /* must not complete if we consumed full budget */
1598 if (work_done >= budget)
1599 break;
1600 }
1601
1602 /* Fall out from the NAPI loop if needed */
1603 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1604 bnx2x_update_fpsb_idx(fp);
1605 /* bnx2x_has_rx_work() reads the status block, thus we need
1606 * to ensure that status block indices have been actually read
1607 * (bnx2x_update_fpsb_idx) prior to this check
1608 * (bnx2x_has_rx_work) so that we won't write the "newer"
1609 * value of the status block to IGU (if there was a DMA right
1610 * after bnx2x_has_rx_work and if there is no rmb, the memory
1611 * reading (bnx2x_update_fpsb_idx) may be postponed to right
1612 * before bnx2x_ack_sb). In this case there will never be
1613 * another interrupt until there is another update of the
1614 * status block, while there is still unhandled work.
1615 */
1616 rmb();
1617
1618 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1619 napi_complete(napi);
1620 /* Re-enable interrupts */
1621 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1622 le16_to_cpu(fp->fp_c_idx),
1623 IGU_INT_NOP, 1);
1624 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1625 le16_to_cpu(fp->fp_u_idx),
1626 IGU_INT_ENABLE, 1);
1627 break;
1628 }
1629 }
1630 }
1631
1632 return work_done;
1633}
1634
1635
1636/* we split the first BD into headers and data BDs
1637 * to ease the pain of our fellow microcode engineers
1638 * we use one mapping for both BDs
1639 * So far this has only been observed to happen
1640 * in Other Operating Systems(TM)
1641 */
1642static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1643 struct bnx2x_fastpath *fp,
1644 struct sw_tx_bd *tx_buf,
1645 struct eth_tx_start_bd **tx_bd, u16 hlen,
1646 u16 bd_prod, int nbd)
1647{
1648 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1649 struct eth_tx_bd *d_tx_bd;
1650 dma_addr_t mapping;
1651 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1652
1653 /* first fix first BD */
1654 h_tx_bd->nbd = cpu_to_le16(nbd);
1655 h_tx_bd->nbytes = cpu_to_le16(hlen);
1656
1657 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1658 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1659 h_tx_bd->addr_lo, h_tx_bd->nbd);
1660
1661 /* now get a new data BD
1662 * (after the pbd) and fill it */
1663 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1664 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1665
1666 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1667 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1668
1669 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1670 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1671 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1672
1673 /* this marks the BD as one that has no individual mapping */
1674 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1675
1676 DP(NETIF_MSG_TX_QUEUED,
1677 "TSO split data size is %d (%x:%x)\n",
1678 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1679
1680 /* update tx_bd */
1681 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1682
1683 return bd_prod;
1684}
1685
1686static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1687{
1688 if (fix > 0)
1689 csum = (u16) ~csum_fold(csum_sub(csum,
1690 csum_partial(t_header - fix, fix, 0)));
1691
1692 else if (fix < 0)
1693 csum = (u16) ~csum_fold(csum_add(csum,
1694 csum_partial(t_header, -fix, 0)));
1695
1696 return swab16(csum);
1697}
1698
1699static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1700{
1701 u32 rc;
1702
1703 if (skb->ip_summed != CHECKSUM_PARTIAL)
1704 rc = XMIT_PLAIN;
1705
1706 else {
1707 if (skb->protocol == htons(ETH_P_IPV6)) {
1708 rc = XMIT_CSUM_V6;
1709 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1710 rc |= XMIT_CSUM_TCP;
1711
1712 } else {
1713 rc = XMIT_CSUM_V4;
1714 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1715 rc |= XMIT_CSUM_TCP;
1716 }
1717 }
1718
1719 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1720 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1721
1722 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1723 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1724
1725 return rc;
1726}
1727
1728#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1729/* check if packet requires linearization (packet is too fragmented)
1730 no need to check fragmentation if page size > 8K (there will be no
1731 violation to FW restrictions) */
1732static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1733 u32 xmit_type)
1734{
1735 int to_copy = 0;
1736 int hlen = 0;
1737 int first_bd_sz = 0;
1738
1739 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1740 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1741
1742 if (xmit_type & XMIT_GSO) {
1743 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1744 /* Check if LSO packet needs to be copied:
1745 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1746 int wnd_size = MAX_FETCH_BD - 3;
1747 /* Number of windows to check */
1748 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1749 int wnd_idx = 0;
1750 int frag_idx = 0;
1751 u32 wnd_sum = 0;
1752
1753 /* Headers length */
1754 hlen = (int)(skb_transport_header(skb) - skb->data) +
1755 tcp_hdrlen(skb);
1756
1757 /* Amount of data (w/o headers) on linear part of SKB*/
1758 first_bd_sz = skb_headlen(skb) - hlen;
1759
1760 wnd_sum = first_bd_sz;
1761
1762 /* Calculate the first sum - it's special */
1763 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1764 wnd_sum +=
1765 skb_shinfo(skb)->frags[frag_idx].size;
1766
1767 /* If there was data on linear skb data - check it */
1768 if (first_bd_sz > 0) {
1769 if (unlikely(wnd_sum < lso_mss)) {
1770 to_copy = 1;
1771 goto exit_lbl;
1772 }
1773
1774 wnd_sum -= first_bd_sz;
1775 }
1776
1777 /* Others are easier: run through the frag list and
1778 check all windows */
1779 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1780 wnd_sum +=
1781 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1782
1783 if (unlikely(wnd_sum < lso_mss)) {
1784 to_copy = 1;
1785 break;
1786 }
1787 wnd_sum -=
1788 skb_shinfo(skb)->frags[wnd_idx].size;
1789 }
1790 } else {
1791 /* in non-LSO too fragmented packet should always
1792 be linearized */
1793 to_copy = 1;
1794 }
1795 }
1796
1797exit_lbl:
1798 if (unlikely(to_copy))
1799 DP(NETIF_MSG_TX_QUEUED,
1800 "Linearization IS REQUIRED for %s packet. "
1801 "num_frags %d hlen %d first_bd_sz %d\n",
1802 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1803 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1804
1805 return to_copy;
1806}
1807#endif
1808
1809/* called with netif_tx_lock
1810 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1811 * netif_wake_queue()
1812 */
1813netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1814{
1815 struct bnx2x *bp = netdev_priv(dev);
1816 struct bnx2x_fastpath *fp;
1817 struct netdev_queue *txq;
1818 struct sw_tx_bd *tx_buf;
1819 struct eth_tx_start_bd *tx_start_bd;
1820 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1821 struct eth_tx_parse_bd *pbd = NULL;
1822 u16 pkt_prod, bd_prod;
1823 int nbd, fp_index;
1824 dma_addr_t mapping;
1825 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1826 int i;
1827 u8 hlen = 0;
1828 __le16 pkt_size = 0;
1829 struct ethhdr *eth;
1830 u8 mac_type = UNICAST_ADDRESS;
1831
1832#ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1834 return NETDEV_TX_BUSY;
1835#endif
1836
1837 fp_index = skb_get_queue_mapping(skb);
1838 txq = netdev_get_tx_queue(dev, fp_index);
1839
1840 fp = &bp->fp[fp_index];
1841
1842 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1843 fp->eth_q_stats.driver_xoff++;
1844 netif_tx_stop_queue(txq);
1845 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1846 return NETDEV_TX_BUSY;
1847 }
1848
1849 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1850 " gso type %x xmit_type %x\n",
1851 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1852 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1853
1854 eth = (struct ethhdr *)skb->data;
1855
1856 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1857 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1858 if (is_broadcast_ether_addr(eth->h_dest))
1859 mac_type = BROADCAST_ADDRESS;
1860 else
1861 mac_type = MULTICAST_ADDRESS;
1862 }
1863
1864#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1865 /* First, check if we need to linearize the skb (due to FW
1866 restrictions). No need to check fragmentation if page size > 8K
1867 (there will be no violation to FW restrictions) */
1868 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1869 /* Statistics of linearization */
1870 bp->lin_cnt++;
1871 if (skb_linearize(skb) != 0) {
1872 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1873 "silently dropping this SKB\n");
1874 dev_kfree_skb_any(skb);
1875 return NETDEV_TX_OK;
1876 }
1877 }
1878#endif
1879
1880 /*
1881 Please read carefully. First we use one BD which we mark as start,
1882 then we have a parsing info BD (used for TSO or xsum),
1883 and only then we have the rest of the TSO BDs.
1884 (don't forget to mark the last one as last,
1885 and to unmap only AFTER you write to the BD ...)
1886 And above all, all pdb sizes are in words - NOT DWORDS!
1887 */
1888
1889 pkt_prod = fp->tx_pkt_prod++;
1890 bd_prod = TX_BD(fp->tx_bd_prod);
1891
1892 /* get a tx_buf and first BD */
1893 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1894 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1895
1896 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1897 tx_start_bd->general_data = (mac_type <<
1898 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
1899 /* header nbd */
1900 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
1901
1902 /* remember the first BD of the packet */
1903 tx_buf->first_bd = fp->tx_bd_prod;
1904 tx_buf->skb = skb;
1905 tx_buf->flags = 0;
1906
1907 DP(NETIF_MSG_TX_QUEUED,
1908 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1909 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1910
1911#ifdef BCM_VLAN
1912 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1913 (bp->flags & HW_VLAN_TX_FLAG)) {
1914 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1915 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
1916 } else
1917#endif
1918 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1919
1920 /* turn on parsing and get a BD */
1921 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1922 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1923
1924 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1925
1926 if (xmit_type & XMIT_CSUM) {
1927 hlen = (skb_network_header(skb) - skb->data) / 2;
1928
1929 /* for now NS flag is not used in Linux */
1930 pbd->global_data =
1931 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1932 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1933
1934 pbd->ip_hlen = (skb_transport_header(skb) -
1935 skb_network_header(skb)) / 2;
1936
1937 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1938
1939 pbd->total_hlen = cpu_to_le16(hlen);
1940 hlen = hlen*2;
1941
1942 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1943
1944 if (xmit_type & XMIT_CSUM_V4)
1945 tx_start_bd->bd_flags.as_bitfield |=
1946 ETH_TX_BD_FLAGS_IP_CSUM;
1947 else
1948 tx_start_bd->bd_flags.as_bitfield |=
1949 ETH_TX_BD_FLAGS_IPV6;
1950
1951 if (xmit_type & XMIT_CSUM_TCP) {
1952 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1953
1954 } else {
1955 s8 fix = SKB_CS_OFF(skb); /* signed! */
1956
1957 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1958
1959 DP(NETIF_MSG_TX_QUEUED,
1960 "hlen %d fix %d csum before fix %x\n",
1961 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1962
1963 /* HW bug: fixup the CSUM */
1964 pbd->tcp_pseudo_csum =
1965 bnx2x_csum_fix(skb_transport_header(skb),
1966 SKB_CS(skb), fix);
1967
1968 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1969 pbd->tcp_pseudo_csum);
1970 }
1971 }
1972
1973 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1974 skb_headlen(skb), DMA_TO_DEVICE);
1975
1976 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1977 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1978 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
1979 tx_start_bd->nbd = cpu_to_le16(nbd);
1980 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1981 pkt_size = tx_start_bd->nbytes;
1982
1983 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
1984 " nbytes %d flags %x vlan %x\n",
1985 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
1986 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
1987 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
1988
1989 if (xmit_type & XMIT_GSO) {
1990
1991 DP(NETIF_MSG_TX_QUEUED,
1992 "TSO packet len %d hlen %d total len %d tso size %d\n",
1993 skb->len, hlen, skb_headlen(skb),
1994 skb_shinfo(skb)->gso_size);
1995
1996 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
1997
1998 if (unlikely(skb_headlen(skb) > hlen))
1999 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2000 hlen, bd_prod, ++nbd);
2001
2002 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2003 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2004 pbd->tcp_flags = pbd_tcp_flags(skb);
2005
2006 if (xmit_type & XMIT_GSO_V4) {
2007 pbd->ip_id = swab16(ip_hdr(skb)->id);
2008 pbd->tcp_pseudo_csum =
2009 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2010 ip_hdr(skb)->daddr,
2011 0, IPPROTO_TCP, 0));
2012
2013 } else
2014 pbd->tcp_pseudo_csum =
2015 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2016 &ipv6_hdr(skb)->daddr,
2017 0, IPPROTO_TCP, 0));
2018
2019 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2020 }
2021 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2022
2023 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2024 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2025
2026 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2027 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2028 if (total_pkt_bd == NULL)
2029 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2030
2031 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2032 frag->page_offset,
2033 frag->size, DMA_TO_DEVICE);
2034
2035 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2036 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2037 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2038 le16_add_cpu(&pkt_size, frag->size);
2039
2040 DP(NETIF_MSG_TX_QUEUED,
2041 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2042 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2043 le16_to_cpu(tx_data_bd->nbytes));
2044 }
2045
2046 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2047
2048 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2049
2050 /* now send a tx doorbell, counting the next BD
2051 * if the packet contains or ends with it
2052 */
2053 if (TX_BD_POFF(bd_prod) < nbd)
2054 nbd++;
2055
2056 if (total_pkt_bd != NULL)
2057 total_pkt_bd->total_pkt_bytes = pkt_size;
2058
2059 if (pbd)
2060 DP(NETIF_MSG_TX_QUEUED,
2061 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2062 " tcp_flags %x xsum %x seq %u hlen %u\n",
2063 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
2064 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
2065 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
2066
2067 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2068
2069 /*
2070 * Make sure that the BD data is updated before updating the producer
2071 * since FW might read the BD right after the producer is updated.
2072 * This is only applicable for weak-ordered memory model archs such
2073 * as IA-64. The following barrier is also mandatory since FW will
2074 * assumes packets must have BDs.
2075 */
2076 wmb();
2077
2078 fp->tx_db.data.prod += nbd;
2079 barrier();
2080 DOORBELL(bp, fp->index, fp->tx_db.raw);
2081
2082 mmiowb();
2083
2084 fp->tx_bd_prod += nbd;
2085
2086 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2087 netif_tx_stop_queue(txq);
2088
2089 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2090 * ordering of set_bit() in netif_tx_stop_queue() and read of
2091 * fp->bd_tx_cons */
2092 smp_mb();
2093
2094 fp->eth_q_stats.driver_xoff++;
2095 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2096 netif_tx_wake_queue(txq);
2097 }
2098 fp->tx_pkt++;
2099
2100 return NETDEV_TX_OK;
2101}
2102/* called with rtnl_lock */
2103int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2104{
2105 struct sockaddr *addr = p;
2106 struct bnx2x *bp = netdev_priv(dev);
2107
2108 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2109 return -EINVAL;
2110
2111 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2112 if (netif_running(dev)) {
2113 if (CHIP_IS_E1(bp))
2114 bnx2x_set_eth_mac_addr_e1(bp, 1);
2115 else
2116 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2117 }
2118
2119 return 0;
2120}
2121
2122/* called with rtnl_lock */
2123int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2124{
2125 struct bnx2x *bp = netdev_priv(dev);
2126 int rc = 0;
2127
2128 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2129 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2130 return -EAGAIN;
2131 }
2132
2133 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2134 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2135 return -EINVAL;
2136
2137 /* This does not race with packet allocation
2138 * because the actual alloc size is
2139 * only updated as part of load
2140 */
2141 dev->mtu = new_mtu;
2142
2143 if (netif_running(dev)) {
2144 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2145 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2146 }
2147
2148 return rc;
2149}
2150
2151void bnx2x_tx_timeout(struct net_device *dev)
2152{
2153 struct bnx2x *bp = netdev_priv(dev);
2154
2155#ifdef BNX2X_STOP_ON_ERROR
2156 if (!bp->panic)
2157 bnx2x_panic();
2158#endif
2159 /* This allows the netif to be shutdown gracefully before resetting */
2160 schedule_delayed_work(&bp->reset_task, 0);
2161}
2162
2163#ifdef BCM_VLAN
2164/* called with rtnl_lock */
2165void bnx2x_vlan_rx_register(struct net_device *dev,
2166 struct vlan_group *vlgrp)
2167{
2168 struct bnx2x *bp = netdev_priv(dev);
2169
2170 bp->vlgrp = vlgrp;
2171
2172 /* Set flags according to the required capabilities */
2173 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2174
2175 if (dev->features & NETIF_F_HW_VLAN_TX)
2176 bp->flags |= HW_VLAN_TX_FLAG;
2177
2178 if (dev->features & NETIF_F_HW_VLAN_RX)
2179 bp->flags |= HW_VLAN_RX_FLAG;
2180
2181 if (netif_running(dev))
2182 bnx2x_set_client_config(bp);
2183}
2184
2185#endif
2186int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2187{
2188 struct net_device *dev = pci_get_drvdata(pdev);
2189 struct bnx2x *bp;
2190
2191 if (!dev) {
2192 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2193 return -ENODEV;
2194 }
2195 bp = netdev_priv(dev);
2196
2197 rtnl_lock();
2198
2199 pci_save_state(pdev);
2200
2201 if (!netif_running(dev)) {
2202 rtnl_unlock();
2203 return 0;
2204 }
2205
2206 netif_device_detach(dev);
2207
2208 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2209
2210 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2211
2212 rtnl_unlock();
2213
2214 return 0;
2215}
2216
2217int bnx2x_resume(struct pci_dev *pdev)
2218{
2219 struct net_device *dev = pci_get_drvdata(pdev);
2220 struct bnx2x *bp;
2221 int rc;
2222
2223 if (!dev) {
2224 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2225 return -ENODEV;
2226 }
2227 bp = netdev_priv(dev);
2228
2229 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2230 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2231 return -EAGAIN;
2232 }
2233
2234 rtnl_lock();
2235
2236 pci_restore_state(pdev);
2237
2238 if (!netif_running(dev)) {
2239 rtnl_unlock();
2240 return 0;
2241 }
2242
2243 bnx2x_set_power_state(bp, PCI_D0);
2244 netif_device_attach(dev);
2245
2246 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2247
2248 rtnl_unlock();
2249
2250 return rc;
2251}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
new file mode 100644
index 000000000000..d1979b1a7ed2
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,652 @@
1/* bnx2x_cmn.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17#ifndef BNX2X_CMN_H
18#define BNX2X_CMN_H
19
20#include <linux/types.h>
21#include <linux/netdevice.h>
22
23
24#include "bnx2x.h"
25
26
27/*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version
29 */
30
31/**
32 * Initialize link parameters structure variables.
33 *
34 * @param bp
35 * @param load_mode
36 *
37 * @return u8
38 */
39u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
40
41/**
42 * Configure hw according to link parameters structure.
43 *
44 * @param bp
45 */
46void bnx2x_link_set(struct bnx2x *bp);
47
48/**
49 * Query link status
50 *
51 * @param bp
52 *
53 * @return 0 - link is UP
54 */
55u8 bnx2x_link_test(struct bnx2x *bp);
56
57/**
58 * Handles link status change
59 *
60 * @param bp
61 */
62void bnx2x__link_status_update(struct bnx2x *bp);
63
64/**
65 * MSI-X slowpath interrupt handler
66 *
67 * @param irq
68 * @param dev_instance
69 *
70 * @return irqreturn_t
71 */
72irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
73
74/**
75 * non MSI-X interrupt handler
76 *
77 * @param irq
78 * @param dev_instance
79 *
80 * @return irqreturn_t
81 */
82irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
83#ifdef BCM_CNIC
84
85/**
86 * Send command to cnic driver
87 *
88 * @param bp
89 * @param cmd
90 */
91int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
92
93/**
94 * Provides cnic information for proper interrupt handling
95 *
96 * @param bp
97 */
98void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
99#endif
100
101/**
102 * Enable HW interrupts.
103 *
104 * @param bp
105 */
106void bnx2x_int_enable(struct bnx2x *bp);
107
108/**
109 * Disable interrupts. This function ensures that there are no
110 * ISRs or SP DPCs (sp_task) are running after it returns.
111 *
112 * @param bp
113 * @param disable_hw if true, disable HW interrupts.
114 */
115void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
116
117/**
118 * Init HW blocks according to current initialization stage:
119 * COMMON, PORT or FUNCTION.
120 *
121 * @param bp
122 * @param load_code: COMMON, PORT or FUNCTION
123 *
124 * @return int
125 */
126int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
127
128/**
129 * Init driver internals:
130 * - rings
131 * - status blocks
132 * - etc.
133 *
134 * @param bp
135 * @param load_code COMMON, PORT or FUNCTION
136 */
137void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
138
139/**
140 * Allocate driver's memory.
141 *
142 * @param bp
143 *
144 * @return int
145 */
146int bnx2x_alloc_mem(struct bnx2x *bp);
147
148/**
149 * Release driver's memory.
150 *
151 * @param bp
152 */
153void bnx2x_free_mem(struct bnx2x *bp);
154
155/**
156 * Bring up a leading (the first) eth Client.
157 *
158 * @param bp
159 *
160 * @return int
161 */
162int bnx2x_setup_leading(struct bnx2x *bp);
163
164/**
165 * Setup non-leading eth Client.
166 *
167 * @param bp
168 * @param fp
169 *
170 * @return int
171 */
172int bnx2x_setup_multi(struct bnx2x *bp, int index);
173
174/**
175 * Set number of quueus according to mode and number of available
176 * msi-x vectors
177 *
178 * @param bp
179 *
180 */
181void bnx2x_set_num_queues_msix(struct bnx2x *bp);
182
183/**
184 * Cleanup chip internals:
185 * - Cleanup MAC configuration.
186 * - Close clients.
187 * - etc.
188 *
189 * @param bp
190 * @param unload_mode
191 */
192void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
193
194/**
195 * Acquire HW lock.
196 *
197 * @param bp
198 * @param resource Resource bit which was locked
199 *
200 * @return int
201 */
202int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
203
204/**
205 * Release HW lock.
206 *
207 * @param bp driver handle
208 * @param resource Resource bit which was locked
209 *
210 * @return int
211 */
212int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
213
214/**
215 * Configure eth MAC address in the HW according to the value in
216 * netdev->dev_addr for 57711
217 *
218 * @param bp driver handle
219 * @param set
220 */
221void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
222
223/**
224 * Configure eth MAC address in the HW according to the value in
225 * netdev->dev_addr for 57710
226 *
227 * @param bp driver handle
228 * @param set
229 */
230void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
231
232#ifdef BCM_CNIC
233/**
234 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
235 * MAC(s). The function will wait until the ramrod completion
236 * returns.
237 *
238 * @param bp driver handle
239 * @param set set or clear the CAM entry
240 *
241 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
242 */
243int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
244#endif
245
246/**
247 * Initialize status block in FW and HW
248 *
249 * @param bp driver handle
250 * @param sb host_status_block
251 * @param dma_addr_t mapping
252 * @param int sb_id
253 */
254void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
255 dma_addr_t mapping, int sb_id);
256
257/**
258 * Reconfigure FW/HW according to dev->flags rx mode
259 *
260 * @param dev net_device
261 *
262 */
263void bnx2x_set_rx_mode(struct net_device *dev);
264
265/**
266 * Configure MAC filtering rules in a FW.
267 *
268 * @param bp driver handle
269 */
270void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
271
272/* Parity errors related */
273void bnx2x_inc_load_cnt(struct bnx2x *bp);
274u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
275bool bnx2x_chk_parity_attn(struct bnx2x *bp);
276bool bnx2x_reset_is_done(struct bnx2x *bp);
277void bnx2x_disable_close_the_gate(struct bnx2x *bp);
278
279/**
280 * Perform statistics handling according to event
281 *
282 * @param bp driver handle
283 * @param even tbnx2x_stats_event
284 */
285void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
286
287/**
288 * Configures FW with client paramteres (like HW VLAN removal)
289 * for each active client.
290 *
291 * @param bp
292 */
293void bnx2x_set_client_config(struct bnx2x *bp);
294
295/**
296 * Handle sp events
297 *
298 * @param fp fastpath handle for the event
299 * @param rr_cqe eth_rx_cqe
300 */
301void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
302
303
304static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
305{
306 struct host_status_block *fpsb = fp->status_blk;
307
308 barrier(); /* status block is written to by the chip */
309 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
310 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
311}
312
313static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
314 struct bnx2x_fastpath *fp,
315 u16 bd_prod, u16 rx_comp_prod,
316 u16 rx_sge_prod)
317{
318 struct ustorm_eth_rx_producers rx_prods = {0};
319 int i;
320
321 /* Update producers */
322 rx_prods.bd_prod = bd_prod;
323 rx_prods.cqe_prod = rx_comp_prod;
324 rx_prods.sge_prod = rx_sge_prod;
325
326 /*
327 * Make sure that the BD and SGE data is updated before updating the
328 * producers since FW might read the BD/SGE right after the producer
329 * is updated.
330 * This is only applicable for weak-ordered memory model archs such
331 * as IA-64. The following barrier is also mandatory since FW will
332 * assumes BDs must have buffers.
333 */
334 wmb();
335
336 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
337 REG_WR(bp, BAR_USTRORM_INTMEM +
338 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
339 ((u32 *)&rx_prods)[i]);
340
341 mmiowb(); /* keep prod updates ordered */
342
343 DP(NETIF_MSG_RX_STATUS,
344 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
345 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
346}
347
348
349
350static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
351 u8 storm, u16 index, u8 op, u8 update)
352{
353 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
354 COMMAND_REG_INT_ACK);
355 struct igu_ack_register igu_ack;
356
357 igu_ack.status_block_index = index;
358 igu_ack.sb_id_and_flags =
359 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
360 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
361 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
362 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
363
364 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
365 (*(u32 *)&igu_ack), hc_addr);
366 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
367
368 /* Make sure that ACK is written */
369 mmiowb();
370 barrier();
371}
372static inline u16 bnx2x_ack_int(struct bnx2x *bp)
373{
374 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
375 COMMAND_REG_SIMD_MASK);
376 u32 result = REG_RD(bp, hc_addr);
377
378 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
379 result, hc_addr);
380
381 return result;
382}
383
384/*
385 * fast path service functions
386 */
387
388static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
389{
390 /* Tell compiler that consumer and producer can change */
391 barrier();
392 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
393}
394
395static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
396{
397 s16 used;
398 u16 prod;
399 u16 cons;
400
401 prod = fp->tx_bd_prod;
402 cons = fp->tx_bd_cons;
403
404 /* NUM_TX_RINGS = number of "next-page" entries
405 It will be used as a threshold */
406 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
407
408#ifdef BNX2X_STOP_ON_ERROR
409 WARN_ON(used < 0);
410 WARN_ON(used > fp->bp->tx_ring_size);
411 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
412#endif
413
414 return (s16)(fp->bp->tx_ring_size) - used;
415}
416
417static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
418{
419 u16 hw_cons;
420
421 /* Tell compiler that status block fields can change */
422 barrier();
423 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
424 return hw_cons != fp->tx_pkt_cons;
425}
426
427static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
429{
430 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
431 struct page *page = sw_buf->page;
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
433
434 /* Skip "next page" elements */
435 if (!page)
436 return;
437
438 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
439 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
440 __free_pages(page, PAGES_PER_SGE_SHIFT);
441
442 sw_buf->page = NULL;
443 sge->addr_hi = 0;
444 sge->addr_lo = 0;
445}
446
447static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
448 struct bnx2x_fastpath *fp, int last)
449{
450 int i;
451
452 for (i = 0; i < last; i++)
453 bnx2x_free_rx_sge(bp, fp, i);
454}
455
456static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
457 struct bnx2x_fastpath *fp, u16 index)
458{
459 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
460 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
461 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
462 dma_addr_t mapping;
463
464 if (unlikely(page == NULL))
465 return -ENOMEM;
466
467 mapping = dma_map_page(&bp->pdev->dev, page, 0,
468 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
469 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
470 __free_pages(page, PAGES_PER_SGE_SHIFT);
471 return -ENOMEM;
472 }
473
474 sw_buf->page = page;
475 dma_unmap_addr_set(sw_buf, mapping, mapping);
476
477 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
478 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
479
480 return 0;
481}
482static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
483 struct bnx2x_fastpath *fp, u16 index)
484{
485 struct sk_buff *skb;
486 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
487 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
488 dma_addr_t mapping;
489
490 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
491 if (unlikely(skb == NULL))
492 return -ENOMEM;
493
494 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
495 DMA_FROM_DEVICE);
496 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
497 dev_kfree_skb(skb);
498 return -ENOMEM;
499 }
500
501 rx_buf->skb = skb;
502 dma_unmap_addr_set(rx_buf, mapping, mapping);
503
504 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
505 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
506
507 return 0;
508}
509
510/* note that we are not allocating a new skb,
511 * we are just moving one from cons to prod
512 * we are not creating a new mapping,
513 * so there is no need to check for dma_mapping_error().
514 */
515static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
516 struct sk_buff *skb, u16 cons, u16 prod)
517{
518 struct bnx2x *bp = fp->bp;
519 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
520 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
521 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
522 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
523
524 dma_sync_single_for_device(&bp->pdev->dev,
525 dma_unmap_addr(cons_rx_buf, mapping),
526 RX_COPY_THRESH, DMA_FROM_DEVICE);
527
528 prod_rx_buf->skb = cons_rx_buf->skb;
529 dma_unmap_addr_set(prod_rx_buf, mapping,
530 dma_unmap_addr(cons_rx_buf, mapping));
531 *prod_bd = *cons_bd;
532}
533
534static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
535{
536 int i, j;
537
538 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
539 int idx = RX_SGE_CNT * i - 1;
540
541 for (j = 0; j < 2; j++) {
542 SGE_MASK_CLEAR_BIT(fp, idx);
543 idx--;
544 }
545 }
546}
547
548static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
549{
550 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
551 memset(fp->sge_mask, 0xff,
552 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
553
554 /* Clear the two last indices in the page to 1:
555 these are the indices that correspond to the "next" element,
556 hence will never be indicated and should be removed from
557 the calculations. */
558 bnx2x_clear_sge_mask_next_elems(fp);
559}
560static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
561 struct bnx2x_fastpath *fp, int last)
562{
563 int i;
564
565 for (i = 0; i < last; i++) {
566 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
567 struct sk_buff *skb = rx_buf->skb;
568
569 if (skb == NULL) {
570 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
571 continue;
572 }
573
574 if (fp->tpa_state[i] == BNX2X_TPA_START)
575 dma_unmap_single(&bp->pdev->dev,
576 dma_unmap_addr(rx_buf, mapping),
577 bp->rx_buf_size, DMA_FROM_DEVICE);
578
579 dev_kfree_skb(skb);
580 rx_buf->skb = NULL;
581 }
582}
583
584
585static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
586{
587 int i, j;
588
589 for_each_queue(bp, j) {
590 struct bnx2x_fastpath *fp = &bp->fp[j];
591
592 for (i = 1; i <= NUM_TX_RINGS; i++) {
593 struct eth_tx_next_bd *tx_next_bd =
594 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
595
596 tx_next_bd->addr_hi =
597 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
598 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
599 tx_next_bd->addr_lo =
600 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
601 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
602 }
603
604 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
605 fp->tx_db.data.zero_fill1 = 0;
606 fp->tx_db.data.prod = 0;
607
608 fp->tx_pkt_prod = 0;
609 fp->tx_pkt_cons = 0;
610 fp->tx_bd_prod = 0;
611 fp->tx_bd_cons = 0;
612 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
613 fp->tx_pkt = 0;
614 }
615}
616static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
617{
618 u16 rx_cons_sb;
619
620 /* Tell compiler that status block fields can change */
621 barrier();
622 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
623 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
624 rx_cons_sb++;
625 return (fp->rx_comp_cons != rx_cons_sb);
626}
627
628/* HW Lock for shared dual port PHYs */
629void bnx2x_acquire_phy_lock(struct bnx2x *bp);
630void bnx2x_release_phy_lock(struct bnx2x *bp);
631
632void bnx2x_link_report(struct bnx2x *bp);
633int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
634int bnx2x_tx_int(struct bnx2x_fastpath *fp);
635void bnx2x_init_rx_rings(struct bnx2x *bp);
636netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
637
638int bnx2x_change_mac_addr(struct net_device *dev, void *p);
639void bnx2x_tx_timeout(struct net_device *dev);
640void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
641void bnx2x_netif_start(struct bnx2x *bp);
642void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
643void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
644int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
645int bnx2x_resume(struct pci_dev *pdev);
646void bnx2x_free_skbs(struct bnx2x *bp);
647int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
648int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
649int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
650int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
651
652#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 0beaefb7a160..0c00e50787f9 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -56,6 +56,7 @@
56#include "bnx2x_init.h" 56#include "bnx2x_init.h"
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59#include "bnx2x_cmn.h"
59 60
60#define DRV_MODULE_VERSION "1.52.53-1" 61#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/18/04" 62#define DRV_MODULE_RELDATE "2010/18/04"
@@ -652,7 +653,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
652 BNX2X_ERR("end crash dump -----------------\n"); 653 BNX2X_ERR("end crash dump -----------------\n");
653} 654}
654 655
655static void bnx2x_int_enable(struct bnx2x *bp) 656void bnx2x_int_enable(struct bnx2x *bp)
656{ 657{
657 int port = BP_PORT(bp); 658 int port = BP_PORT(bp);
658 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 659 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -734,7 +735,7 @@ static void bnx2x_int_disable(struct bnx2x *bp)
734 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 735 BNX2X_ERR("BUG! proper val not read from IGU!\n");
735} 736}
736 737
737static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 738void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
738{ 739{
739 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 740 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
740 int i, offset; 741 int i, offset;
@@ -804,235 +805,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
804 return false; 805 return false;
805} 806}
806 807
807static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
808 u8 storm, u16 index, u8 op, u8 update)
809{
810 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
811 COMMAND_REG_INT_ACK);
812 struct igu_ack_register igu_ack;
813
814 igu_ack.status_block_index = index;
815 igu_ack.sb_id_and_flags =
816 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
817 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
818 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
819 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
820
821 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
822 (*(u32 *)&igu_ack), hc_addr);
823 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
824
825 /* Make sure that ACK is written */
826 mmiowb();
827 barrier();
828}
829
830static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
831{
832 struct host_status_block *fpsb = fp->status_blk;
833
834 barrier(); /* status block is written to by the chip */
835 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
836 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
837}
838
839static u16 bnx2x_ack_int(struct bnx2x *bp)
840{
841 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
842 COMMAND_REG_SIMD_MASK);
843 u32 result = REG_RD(bp, hc_addr);
844
845 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
846 result, hc_addr);
847
848 return result;
849}
850
851
852/*
853 * fast path service functions
854 */
855
856static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
857{
858 /* Tell compiler that consumer and producer can change */
859 barrier();
860 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
861}
862
863/* free skb in the packet ring at pos idx
864 * return idx of last bd freed
865 */
866static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
867 u16 idx)
868{
869 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
870 struct eth_tx_start_bd *tx_start_bd;
871 struct eth_tx_bd *tx_data_bd;
872 struct sk_buff *skb = tx_buf->skb;
873 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
874 int nbd;
875
876 /* prefetch skb end pointer to speedup dev_kfree_skb() */
877 prefetch(&skb->end);
878
879 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
880 idx, tx_buf, skb);
881
882 /* unmap first bd */
883 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
884 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
885 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
886 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
887
888 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
889#ifdef BNX2X_STOP_ON_ERROR
890 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
891 BNX2X_ERR("BAD nbd!\n");
892 bnx2x_panic();
893 }
894#endif
895 new_cons = nbd + tx_buf->first_bd;
896
897 /* Get the next bd */
898 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
899
900 /* Skip a parse bd... */
901 --nbd;
902 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
903
904 /* ...and the TSO split header bd since they have no mapping */
905 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
906 --nbd;
907 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
908 }
909
910 /* now free frags */
911 while (nbd > 0) {
912
913 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
914 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
915 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
916 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
917 if (--nbd)
918 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
919 }
920
921 /* release skb */
922 WARN_ON(!skb);
923 dev_kfree_skb(skb);
924 tx_buf->first_bd = 0;
925 tx_buf->skb = NULL;
926
927 return new_cons;
928}
929
930static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
931{
932 s16 used;
933 u16 prod;
934 u16 cons;
935
936 prod = fp->tx_bd_prod;
937 cons = fp->tx_bd_cons;
938
939 /* NUM_TX_RINGS = number of "next-page" entries
940 It will be used as a threshold */
941 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
942
943#ifdef BNX2X_STOP_ON_ERROR
944 WARN_ON(used < 0);
945 WARN_ON(used > fp->bp->tx_ring_size);
946 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
947#endif
948
949 return (s16)(fp->bp->tx_ring_size) - used;
950}
951
952static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
953{
954 u16 hw_cons;
955
956 /* Tell compiler that status block fields can change */
957 barrier();
958 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
959 return hw_cons != fp->tx_pkt_cons;
960}
961
962static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
963{
964 struct bnx2x *bp = fp->bp;
965 struct netdev_queue *txq;
966 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
967
968#ifdef BNX2X_STOP_ON_ERROR
969 if (unlikely(bp->panic))
970 return -1;
971#endif
972
973 txq = netdev_get_tx_queue(bp->dev, fp->index);
974 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
975 sw_cons = fp->tx_pkt_cons;
976
977 while (sw_cons != hw_cons) {
978 u16 pkt_cons;
979
980 pkt_cons = TX_BD(sw_cons);
981
982 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
983
984 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
985 hw_cons, sw_cons, pkt_cons);
986
987/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
988 rmb();
989 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
990 }
991*/
992 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
993 sw_cons++;
994 }
995
996 fp->tx_pkt_cons = sw_cons;
997 fp->tx_bd_cons = bd_cons;
998
999 /* Need to make the tx_bd_cons update visible to start_xmit()
1000 * before checking for netif_tx_queue_stopped(). Without the
1001 * memory barrier, there is a small possibility that
1002 * start_xmit() will miss it and cause the queue to be stopped
1003 * forever.
1004 */
1005 smp_mb();
1006
1007 /* TBD need a thresh? */
1008 if (unlikely(netif_tx_queue_stopped(txq))) {
1009 /* Taking tx_lock() is needed to prevent reenabling the queue
1010 * while it's empty. This could have happen if rx_action() gets
1011 * suspended in bnx2x_tx_int() after the condition before
1012 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1013 *
1014 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1015 * sends some packets consuming the whole queue again->
1016 * stops the queue
1017 */
1018
1019 __netif_tx_lock(txq, smp_processor_id());
1020
1021 if ((netif_tx_queue_stopped(txq)) &&
1022 (bp->state == BNX2X_STATE_OPEN) &&
1023 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1024 netif_tx_wake_queue(txq);
1025
1026 __netif_tx_unlock(txq);
1027 }
1028 return 0;
1029}
1030 808
1031#ifdef BCM_CNIC 809#ifdef BCM_CNIC
1032static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); 810static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1033#endif 811#endif
1034 812
1035static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 813void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1036 union eth_rx_cqe *rr_cqe) 814 union eth_rx_cqe *rr_cqe)
1037{ 815{
1038 struct bnx2x *bp = fp->bp; 816 struct bnx2x *bp = fp->bp;
@@ -1116,717 +894,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1116 mb(); /* force bnx2x_wait_ramrod() to see the change */ 894 mb(); /* force bnx2x_wait_ramrod() to see the change */
1117} 895}
1118 896
1119static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 897irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1120 struct bnx2x_fastpath *fp, u16 index)
1121{
1122 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1123 struct page *page = sw_buf->page;
1124 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1125
1126 /* Skip "next page" elements */
1127 if (!page)
1128 return;
1129
1130 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1131 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1132 __free_pages(page, PAGES_PER_SGE_SHIFT);
1133
1134 sw_buf->page = NULL;
1135 sge->addr_hi = 0;
1136 sge->addr_lo = 0;
1137}
1138
1139static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1140 struct bnx2x_fastpath *fp, int last)
1141{
1142 int i;
1143
1144 for (i = 0; i < last; i++)
1145 bnx2x_free_rx_sge(bp, fp, i);
1146}
1147
1148static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1149 struct bnx2x_fastpath *fp, u16 index)
1150{
1151 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1152 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1153 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1154 dma_addr_t mapping;
1155
1156 if (unlikely(page == NULL))
1157 return -ENOMEM;
1158
1159 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1160 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1161 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1162 __free_pages(page, PAGES_PER_SGE_SHIFT);
1163 return -ENOMEM;
1164 }
1165
1166 sw_buf->page = page;
1167 dma_unmap_addr_set(sw_buf, mapping, mapping);
1168
1169 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1170 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1171
1172 return 0;
1173}
1174
1175static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1176 struct bnx2x_fastpath *fp, u16 index)
1177{
1178 struct sk_buff *skb;
1179 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1180 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1181 dma_addr_t mapping;
1182
1183 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1184 if (unlikely(skb == NULL))
1185 return -ENOMEM;
1186
1187 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1188 DMA_FROM_DEVICE);
1189 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1190 dev_kfree_skb(skb);
1191 return -ENOMEM;
1192 }
1193
1194 rx_buf->skb = skb;
1195 dma_unmap_addr_set(rx_buf, mapping, mapping);
1196
1197 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1198 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1199
1200 return 0;
1201}
1202
1203/* note that we are not allocating a new skb,
1204 * we are just moving one from cons to prod
1205 * we are not creating a new mapping,
1206 * so there is no need to check for dma_mapping_error().
1207 */
1208static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1209 struct sk_buff *skb, u16 cons, u16 prod)
1210{
1211 struct bnx2x *bp = fp->bp;
1212 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1213 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1214 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1215 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1216
1217 dma_sync_single_for_device(&bp->pdev->dev,
1218 dma_unmap_addr(cons_rx_buf, mapping),
1219 RX_COPY_THRESH, DMA_FROM_DEVICE);
1220
1221 prod_rx_buf->skb = cons_rx_buf->skb;
1222 dma_unmap_addr_set(prod_rx_buf, mapping,
1223 dma_unmap_addr(cons_rx_buf, mapping));
1224 *prod_bd = *cons_bd;
1225}
1226
1227static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1228 u16 idx)
1229{
1230 u16 last_max = fp->last_max_sge;
1231
1232 if (SUB_S16(idx, last_max) > 0)
1233 fp->last_max_sge = idx;
1234}
1235
1236static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1237{
1238 int i, j;
1239
1240 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1241 int idx = RX_SGE_CNT * i - 1;
1242
1243 for (j = 0; j < 2; j++) {
1244 SGE_MASK_CLEAR_BIT(fp, idx);
1245 idx--;
1246 }
1247 }
1248}
1249
1250static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1251 struct eth_fast_path_rx_cqe *fp_cqe)
1252{
1253 struct bnx2x *bp = fp->bp;
1254 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1255 le16_to_cpu(fp_cqe->len_on_bd)) >>
1256 SGE_PAGE_SHIFT;
1257 u16 last_max, last_elem, first_elem;
1258 u16 delta = 0;
1259 u16 i;
1260
1261 if (!sge_len)
1262 return;
1263
1264 /* First mark all used pages */
1265 for (i = 0; i < sge_len; i++)
1266 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1267
1268 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1269 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1270
1271 /* Here we assume that the last SGE index is the biggest */
1272 prefetch((void *)(fp->sge_mask));
1273 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1274
1275 last_max = RX_SGE(fp->last_max_sge);
1276 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1277 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1278
1279 /* If ring is not full */
1280 if (last_elem + 1 != first_elem)
1281 last_elem++;
1282
1283 /* Now update the prod */
1284 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1285 if (likely(fp->sge_mask[i]))
1286 break;
1287
1288 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1289 delta += RX_SGE_MASK_ELEM_SZ;
1290 }
1291
1292 if (delta > 0) {
1293 fp->rx_sge_prod += delta;
1294 /* clear page-end entries */
1295 bnx2x_clear_sge_mask_next_elems(fp);
1296 }
1297
1298 DP(NETIF_MSG_RX_STATUS,
1299 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1300 fp->last_max_sge, fp->rx_sge_prod);
1301}
1302
1303static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1304{
1305 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1306 memset(fp->sge_mask, 0xff,
1307 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1308
1309 /* Clear the two last indices in the page to 1:
1310 these are the indices that correspond to the "next" element,
1311 hence will never be indicated and should be removed from
1312 the calculations. */
1313 bnx2x_clear_sge_mask_next_elems(fp);
1314}
1315
1316static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1317 struct sk_buff *skb, u16 cons, u16 prod)
1318{
1319 struct bnx2x *bp = fp->bp;
1320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1323 dma_addr_t mapping;
1324
1325 /* move empty skb from pool to prod and map it */
1326 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1327 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1328 bp->rx_buf_size, DMA_FROM_DEVICE);
1329 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1330
1331 /* move partial skb from cons to pool (don't unmap yet) */
1332 fp->tpa_pool[queue] = *cons_rx_buf;
1333
1334 /* mark bin state as start - print error if current state != stop */
1335 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1336 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1337
1338 fp->tpa_state[queue] = BNX2X_TPA_START;
1339
1340 /* point prod_bd to new skb */
1341 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1342 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1343
1344#ifdef BNX2X_STOP_ON_ERROR
1345 fp->tpa_queue_used |= (1 << queue);
1346#ifdef _ASM_GENERIC_INT_L64_H
1347 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1348#else
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1350#endif
1351 fp->tpa_queue_used);
1352#endif
1353}
1354
1355static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1356 struct sk_buff *skb,
1357 struct eth_fast_path_rx_cqe *fp_cqe,
1358 u16 cqe_idx)
1359{
1360 struct sw_rx_page *rx_pg, old_rx_pg;
1361 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1362 u32 i, frag_len, frag_size, pages;
1363 int err;
1364 int j;
1365
1366 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1367 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1368
1369 /* This is needed in order to enable forwarding support */
1370 if (frag_size)
1371 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1372 max(frag_size, (u32)len_on_bd));
1373
1374#ifdef BNX2X_STOP_ON_ERROR
1375 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1376 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1377 pages, cqe_idx);
1378 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1379 fp_cqe->pkt_len, len_on_bd);
1380 bnx2x_panic();
1381 return -EINVAL;
1382 }
1383#endif
1384
1385 /* Run through the SGL and compose the fragmented skb */
1386 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1387 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1388
1389 /* FW gives the indices of the SGE as if the ring is an array
1390 (meaning that "next" element will consume 2 indices) */
1391 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1392 rx_pg = &fp->rx_page_ring[sge_idx];
1393 old_rx_pg = *rx_pg;
1394
1395 /* If we fail to allocate a substitute page, we simply stop
1396 where we are and drop the whole packet */
1397 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1398 if (unlikely(err)) {
1399 fp->eth_q_stats.rx_skb_alloc_failed++;
1400 return err;
1401 }
1402
1403 /* Unmap the page as we r going to pass it to the stack */
1404 dma_unmap_page(&bp->pdev->dev,
1405 dma_unmap_addr(&old_rx_pg, mapping),
1406 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1407
1408 /* Add one frag and update the appropriate fields in the skb */
1409 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1410
1411 skb->data_len += frag_len;
1412 skb->truesize += frag_len;
1413 skb->len += frag_len;
1414
1415 frag_size -= frag_len;
1416 }
1417
1418 return 0;
1419}
1420
1421static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1422 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1423 u16 cqe_idx)
1424{
1425 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1426 struct sk_buff *skb = rx_buf->skb;
1427 /* alloc new skb */
1428 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1429
1430 /* Unmap skb in the pool anyway, as we are going to change
1431 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1432 fails. */
1433 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1434 bp->rx_buf_size, DMA_FROM_DEVICE);
1435
1436 if (likely(new_skb)) {
1437 /* fix ip xsum and give it to the stack */
1438 /* (no need to map the new skb) */
1439#ifdef BCM_VLAN
1440 int is_vlan_cqe =
1441 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1442 PARSING_FLAGS_VLAN);
1443 int is_not_hwaccel_vlan_cqe =
1444 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1445#endif
1446
1447 prefetch(skb);
1448 prefetch(((char *)(skb)) + 128);
1449
1450#ifdef BNX2X_STOP_ON_ERROR
1451 if (pad + len > bp->rx_buf_size) {
1452 BNX2X_ERR("skb_put is about to fail... "
1453 "pad %d len %d rx_buf_size %d\n",
1454 pad, len, bp->rx_buf_size);
1455 bnx2x_panic();
1456 return;
1457 }
1458#endif
1459
1460 skb_reserve(skb, pad);
1461 skb_put(skb, len);
1462
1463 skb->protocol = eth_type_trans(skb, bp->dev);
1464 skb->ip_summed = CHECKSUM_UNNECESSARY;
1465
1466 {
1467 struct iphdr *iph;
1468
1469 iph = (struct iphdr *)skb->data;
1470#ifdef BCM_VLAN
1471 /* If there is no Rx VLAN offloading -
1472 take VLAN tag into an account */
1473 if (unlikely(is_not_hwaccel_vlan_cqe))
1474 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1475#endif
1476 iph->check = 0;
1477 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1478 }
1479
1480 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1481 &cqe->fast_path_cqe, cqe_idx)) {
1482#ifdef BCM_VLAN
1483 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1484 (!is_not_hwaccel_vlan_cqe))
1485 vlan_gro_receive(&fp->napi, bp->vlgrp,
1486 le16_to_cpu(cqe->fast_path_cqe.
1487 vlan_tag), skb);
1488 else
1489#endif
1490 napi_gro_receive(&fp->napi, skb);
1491 } else {
1492 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1493 " - dropping packet!\n");
1494 dev_kfree_skb(skb);
1495 }
1496
1497
1498 /* put new skb in bin */
1499 fp->tpa_pool[queue].skb = new_skb;
1500
1501 } else {
1502 /* else drop the packet and keep the buffer in the bin */
1503 DP(NETIF_MSG_RX_STATUS,
1504 "Failed to allocate new skb - dropping packet!\n");
1505 fp->eth_q_stats.rx_skb_alloc_failed++;
1506 }
1507
1508 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1509}
1510
1511static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1512 struct bnx2x_fastpath *fp,
1513 u16 bd_prod, u16 rx_comp_prod,
1514 u16 rx_sge_prod)
1515{
1516 struct ustorm_eth_rx_producers rx_prods = {0};
1517 int i;
1518
1519 /* Update producers */
1520 rx_prods.bd_prod = bd_prod;
1521 rx_prods.cqe_prod = rx_comp_prod;
1522 rx_prods.sge_prod = rx_sge_prod;
1523
1524 /*
1525 * Make sure that the BD and SGE data is updated before updating the
1526 * producers since FW might read the BD/SGE right after the producer
1527 * is updated.
1528 * This is only applicable for weak-ordered memory model archs such
1529 * as IA-64. The following barrier is also mandatory since FW will
1530 * assumes BDs must have buffers.
1531 */
1532 wmb();
1533
1534 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1535 REG_WR(bp, BAR_USTRORM_INTMEM +
1536 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1537 ((u32 *)&rx_prods)[i]);
1538
1539 mmiowb(); /* keep prod updates ordered */
1540
1541 DP(NETIF_MSG_RX_STATUS,
1542 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1543 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1544}
1545
1546/* Set Toeplitz hash value in the skb using the value from the
1547 * CQE (calculated by HW).
1548 */
1549static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
1550 struct sk_buff *skb)
1551{
1552 /* Set Toeplitz hash from CQE */
1553 if ((bp->dev->features & NETIF_F_RXHASH) &&
1554 (cqe->fast_path_cqe.status_flags &
1555 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1556 skb->rxhash =
1557 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1558}
1559
1560static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1561{
1562 struct bnx2x *bp = fp->bp;
1563 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1564 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1565 int rx_pkt = 0;
1566
1567#ifdef BNX2X_STOP_ON_ERROR
1568 if (unlikely(bp->panic))
1569 return 0;
1570#endif
1571
1572 /* CQ "next element" is of the size of the regular element,
1573 that's why it's ok here */
1574 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1575 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1576 hw_comp_cons++;
1577
1578 bd_cons = fp->rx_bd_cons;
1579 bd_prod = fp->rx_bd_prod;
1580 bd_prod_fw = bd_prod;
1581 sw_comp_cons = fp->rx_comp_cons;
1582 sw_comp_prod = fp->rx_comp_prod;
1583
1584 /* Memory barrier necessary as speculative reads of the rx
1585 * buffer can be ahead of the index in the status block
1586 */
1587 rmb();
1588
1589 DP(NETIF_MSG_RX_STATUS,
1590 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1591 fp->index, hw_comp_cons, sw_comp_cons);
1592
1593 while (sw_comp_cons != hw_comp_cons) {
1594 struct sw_rx_bd *rx_buf = NULL;
1595 struct sk_buff *skb;
1596 union eth_rx_cqe *cqe;
1597 u8 cqe_fp_flags;
1598 u16 len, pad;
1599
1600 comp_ring_cons = RCQ_BD(sw_comp_cons);
1601 bd_prod = RX_BD(bd_prod);
1602 bd_cons = RX_BD(bd_cons);
1603
1604 /* Prefetch the page containing the BD descriptor
1605 at producer's index. It will be needed when new skb is
1606 allocated */
1607 prefetch((void *)(PAGE_ALIGN((unsigned long)
1608 (&fp->rx_desc_ring[bd_prod])) -
1609 PAGE_SIZE + 1));
1610
1611 cqe = &fp->rx_comp_ring[comp_ring_cons];
1612 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1613
1614 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1615 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1616 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1617 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1618 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1619 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1620
1621 /* is this a slowpath msg? */
1622 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1623 bnx2x_sp_event(fp, cqe);
1624 goto next_cqe;
1625
1626 /* this is an rx packet */
1627 } else {
1628 rx_buf = &fp->rx_buf_ring[bd_cons];
1629 skb = rx_buf->skb;
1630 prefetch(skb);
1631 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1632 pad = cqe->fast_path_cqe.placement_offset;
1633
1634 /* If CQE is marked both TPA_START and TPA_END
1635 it is a non-TPA CQE */
1636 if ((!fp->disable_tpa) &&
1637 (TPA_TYPE(cqe_fp_flags) !=
1638 (TPA_TYPE_START | TPA_TYPE_END))) {
1639 u16 queue = cqe->fast_path_cqe.queue_index;
1640
1641 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1642 DP(NETIF_MSG_RX_STATUS,
1643 "calling tpa_start on queue %d\n",
1644 queue);
1645
1646 bnx2x_tpa_start(fp, queue, skb,
1647 bd_cons, bd_prod);
1648
1649 /* Set Toeplitz hash for an LRO skb */
1650 bnx2x_set_skb_rxhash(bp, cqe, skb);
1651
1652 goto next_rx;
1653 }
1654
1655 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1656 DP(NETIF_MSG_RX_STATUS,
1657 "calling tpa_stop on queue %d\n",
1658 queue);
1659
1660 if (!BNX2X_RX_SUM_FIX(cqe))
1661 BNX2X_ERR("STOP on none TCP "
1662 "data\n");
1663
1664 /* This is a size of the linear data
1665 on this skb */
1666 len = le16_to_cpu(cqe->fast_path_cqe.
1667 len_on_bd);
1668 bnx2x_tpa_stop(bp, fp, queue, pad,
1669 len, cqe, comp_ring_cons);
1670#ifdef BNX2X_STOP_ON_ERROR
1671 if (bp->panic)
1672 return 0;
1673#endif
1674
1675 bnx2x_update_sge_prod(fp,
1676 &cqe->fast_path_cqe);
1677 goto next_cqe;
1678 }
1679 }
1680
1681 dma_sync_single_for_device(&bp->pdev->dev,
1682 dma_unmap_addr(rx_buf, mapping),
1683 pad + RX_COPY_THRESH,
1684 DMA_FROM_DEVICE);
1685 prefetch(((char *)(skb)) + 128);
1686
1687 /* is this an error packet? */
1688 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1689 DP(NETIF_MSG_RX_ERR,
1690 "ERROR flags %x rx packet %u\n",
1691 cqe_fp_flags, sw_comp_cons);
1692 fp->eth_q_stats.rx_err_discard_pkt++;
1693 goto reuse_rx;
1694 }
1695
1696 /* Since we don't have a jumbo ring
1697 * copy small packets if mtu > 1500
1698 */
1699 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1700 (len <= RX_COPY_THRESH)) {
1701 struct sk_buff *new_skb;
1702
1703 new_skb = netdev_alloc_skb(bp->dev,
1704 len + pad);
1705 if (new_skb == NULL) {
1706 DP(NETIF_MSG_RX_ERR,
1707 "ERROR packet dropped "
1708 "because of alloc failure\n");
1709 fp->eth_q_stats.rx_skb_alloc_failed++;
1710 goto reuse_rx;
1711 }
1712
1713 /* aligned copy */
1714 skb_copy_from_linear_data_offset(skb, pad,
1715 new_skb->data + pad, len);
1716 skb_reserve(new_skb, pad);
1717 skb_put(new_skb, len);
1718
1719 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1720
1721 skb = new_skb;
1722
1723 } else
1724 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1725 dma_unmap_single(&bp->pdev->dev,
1726 dma_unmap_addr(rx_buf, mapping),
1727 bp->rx_buf_size,
1728 DMA_FROM_DEVICE);
1729 skb_reserve(skb, pad);
1730 skb_put(skb, len);
1731
1732 } else {
1733 DP(NETIF_MSG_RX_ERR,
1734 "ERROR packet dropped because "
1735 "of alloc failure\n");
1736 fp->eth_q_stats.rx_skb_alloc_failed++;
1737reuse_rx:
1738 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1739 goto next_rx;
1740 }
1741
1742 skb->protocol = eth_type_trans(skb, bp->dev);
1743
1744 /* Set Toeplitz hash for a none-LRO skb */
1745 bnx2x_set_skb_rxhash(bp, cqe, skb);
1746
1747 skb->ip_summed = CHECKSUM_NONE;
1748 if (bp->rx_csum) {
1749 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1750 skb->ip_summed = CHECKSUM_UNNECESSARY;
1751 else
1752 fp->eth_q_stats.hw_csum_err++;
1753 }
1754 }
1755
1756 skb_record_rx_queue(skb, fp->index);
1757
1758#ifdef BCM_VLAN
1759 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1760 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1761 PARSING_FLAGS_VLAN))
1762 vlan_gro_receive(&fp->napi, bp->vlgrp,
1763 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1764 else
1765#endif
1766 napi_gro_receive(&fp->napi, skb);
1767
1768
1769next_rx:
1770 rx_buf->skb = NULL;
1771
1772 bd_cons = NEXT_RX_IDX(bd_cons);
1773 bd_prod = NEXT_RX_IDX(bd_prod);
1774 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1775 rx_pkt++;
1776next_cqe:
1777 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1778 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1779
1780 if (rx_pkt == budget)
1781 break;
1782 } /* while */
1783
1784 fp->rx_bd_cons = bd_cons;
1785 fp->rx_bd_prod = bd_prod_fw;
1786 fp->rx_comp_cons = sw_comp_cons;
1787 fp->rx_comp_prod = sw_comp_prod;
1788
1789 /* Update producers */
1790 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1791 fp->rx_sge_prod);
1792
1793 fp->rx_pkt += rx_pkt;
1794 fp->rx_calls++;
1795
1796 return rx_pkt;
1797}
1798
1799static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1800{
1801 struct bnx2x_fastpath *fp = fp_cookie;
1802 struct bnx2x *bp = fp->bp;
1803
1804 /* Return here if interrupt is disabled */
1805 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1806 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1807 return IRQ_HANDLED;
1808 }
1809
1810 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1811 fp->index, fp->sb_id);
1812 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1813
1814#ifdef BNX2X_STOP_ON_ERROR
1815 if (unlikely(bp->panic))
1816 return IRQ_HANDLED;
1817#endif
1818
1819 /* Handle Rx and Tx according to MSI-X vector */
1820 prefetch(fp->rx_cons_sb);
1821 prefetch(fp->tx_cons_sb);
1822 prefetch(&fp->status_blk->u_status_block.status_block_index);
1823 prefetch(&fp->status_blk->c_status_block.status_block_index);
1824 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1825
1826 return IRQ_HANDLED;
1827}
1828
1829static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1830{ 898{
1831 struct bnx2x *bp = netdev_priv(dev_instance); 899 struct bnx2x *bp = netdev_priv(dev_instance);
1832 u16 status = bnx2x_ack_int(bp); 900 u16 status = bnx2x_ack_int(bp);
@@ -1900,7 +968,6 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1900 968
1901/* end of fast path */ 969/* end of fast path */
1902 970
1903static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1904 971
1905/* Link */ 972/* Link */
1906 973
@@ -1908,7 +975,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1908 * General service functions 975 * General service functions
1909 */ 976 */
1910 977
1911static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 978int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1912{ 979{
1913 u32 lock_status; 980 u32 lock_status;
1914 u32 resource_bit = (1 << resource); 981 u32 resource_bit = (1 << resource);
@@ -1953,7 +1020,7 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1953 return -EAGAIN; 1020 return -EAGAIN;
1954} 1021}
1955 1022
1956static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 1023int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1957{ 1024{
1958 u32 lock_status; 1025 u32 lock_status;
1959 u32 resource_bit = (1 << resource); 1026 u32 resource_bit = (1 << resource);
@@ -1989,22 +1056,6 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1989 return 0; 1056 return 0;
1990} 1057}
1991 1058
1992/* HW Lock for shared dual port PHYs */
1993static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1994{
1995 mutex_lock(&bp->port.phy_mutex);
1996
1997 if (bp->port.need_hw_lock)
1998 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1999}
2000
2001static void bnx2x_release_phy_lock(struct bnx2x *bp)
2002{
2003 if (bp->port.need_hw_lock)
2004 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
2005
2006 mutex_unlock(&bp->port.phy_mutex);
2007}
2008 1059
2009int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 1060int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2010{ 1061{
@@ -2181,7 +1232,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2181 return 0; 1232 return 0;
2182} 1233}
2183 1234
2184static void bnx2x_calc_fc_adv(struct bnx2x *bp) 1235void bnx2x_calc_fc_adv(struct bnx2x *bp)
2185{ 1236{
2186 switch (bp->link_vars.ieee_fc & 1237 switch (bp->link_vars.ieee_fc &
2187 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 1238 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
@@ -2206,58 +1257,8 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2206 } 1257 }
2207} 1258}
2208 1259
2209static void bnx2x_link_report(struct bnx2x *bp)
2210{
2211 if (bp->flags & MF_FUNC_DIS) {
2212 netif_carrier_off(bp->dev);
2213 netdev_err(bp->dev, "NIC Link is Down\n");
2214 return;
2215 }
2216
2217 if (bp->link_vars.link_up) {
2218 u16 line_speed;
2219
2220 if (bp->state == BNX2X_STATE_OPEN)
2221 netif_carrier_on(bp->dev);
2222 netdev_info(bp->dev, "NIC Link is Up, ");
2223
2224 line_speed = bp->link_vars.line_speed;
2225 if (IS_E1HMF(bp)) {
2226 u16 vn_max_rate;
2227
2228 vn_max_rate =
2229 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2230 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2231 if (vn_max_rate < line_speed)
2232 line_speed = vn_max_rate;
2233 }
2234 pr_cont("%d Mbps ", line_speed);
2235
2236 if (bp->link_vars.duplex == DUPLEX_FULL)
2237 pr_cont("full duplex");
2238 else
2239 pr_cont("half duplex");
2240
2241 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2242 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2243 pr_cont(", receive ");
2244 if (bp->link_vars.flow_ctrl &
2245 BNX2X_FLOW_CTRL_TX)
2246 pr_cont("& transmit ");
2247 } else {
2248 pr_cont(", transmit ");
2249 }
2250 pr_cont("flow control ON");
2251 }
2252 pr_cont("\n");
2253
2254 } else { /* link_down */
2255 netif_carrier_off(bp->dev);
2256 netdev_err(bp->dev, "NIC Link is Down\n");
2257 }
2258}
2259 1260
2260static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 1261u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2261{ 1262{
2262 if (!BP_NOMCP(bp)) { 1263 if (!BP_NOMCP(bp)) {
2263 u8 rc; 1264 u8 rc;
@@ -2292,7 +1293,7 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2292 return -EINVAL; 1293 return -EINVAL;
2293} 1294}
2294 1295
2295static void bnx2x_link_set(struct bnx2x *bp) 1296void bnx2x_link_set(struct bnx2x *bp)
2296{ 1297{
2297 if (!BP_NOMCP(bp)) { 1298 if (!BP_NOMCP(bp)) {
2298 bnx2x_acquire_phy_lock(bp); 1299 bnx2x_acquire_phy_lock(bp);
@@ -2314,7 +1315,7 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2314 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 1315 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2315} 1316}
2316 1317
2317static u8 bnx2x_link_test(struct bnx2x *bp) 1318u8 bnx2x_link_test(struct bnx2x *bp)
2318{ 1319{
2319 u8 rc = 0; 1320 u8 rc = 0;
2320 1321
@@ -2546,7 +1547,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2546 } 1547 }
2547} 1548}
2548 1549
2549static void bnx2x__link_status_update(struct bnx2x *bp) 1550void bnx2x__link_status_update(struct bnx2x *bp)
2550{ 1551{
2551 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) 1552 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2552 return; 1553 return;
@@ -2627,9 +1628,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2627 return rc; 1628 return rc;
2628} 1629}
2629 1630
2630static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2631static void bnx2x_set_rx_mode(struct net_device *dev);
2632
2633static void bnx2x_e1h_disable(struct bnx2x *bp) 1631static void bnx2x_e1h_disable(struct bnx2x *bp)
2634{ 1632{
2635 int port = BP_PORT(bp); 1633 int port = BP_PORT(bp);
@@ -2757,7 +1755,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2757} 1755}
2758 1756
2759/* the slow path queue is odd since completions arrive on the fastpath ring */ 1757/* the slow path queue is odd since completions arrive on the fastpath ring */
2760static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1758int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2761 u32 data_hi, u32 data_lo, int common) 1759 u32 data_hi, u32 data_lo, int common)
2762{ 1760{
2763 struct eth_spe *spe; 1761 struct eth_spe *spe;
@@ -3169,10 +2167,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3169 } 2167 }
3170} 2168}
3171 2169
3172static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3173static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3174
3175
3176#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 2170#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3177#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ 2171#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3178#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 2172#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
@@ -3206,7 +2200,7 @@ static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3206/* 2200/*
3207 * should be run under rtnl lock 2201 * should be run under rtnl lock
3208 */ 2202 */
3209static inline bool bnx2x_reset_is_done(struct bnx2x *bp) 2203bool bnx2x_reset_is_done(struct bnx2x *bp)
3210{ 2204{
3211 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2205 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3212 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 2206 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
@@ -3216,7 +2210,7 @@ static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3216/* 2210/*
3217 * should be run under rtnl lock 2211 * should be run under rtnl lock
3218 */ 2212 */
3219static inline void bnx2x_inc_load_cnt(struct bnx2x *bp) 2213inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3220{ 2214{
3221 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2215 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3222 2216
@@ -3231,7 +2225,7 @@ static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3231/* 2225/*
3232 * should be run under rtnl lock 2226 * should be run under rtnl lock
3233 */ 2227 */
3234static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp) 2228u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3235{ 2229{
3236 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2230 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3237 2231
@@ -3449,7 +2443,7 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3449 return false; 2443 return false;
3450} 2444}
3451 2445
3452static bool bnx2x_chk_parity_attn(struct bnx2x *bp) 2446bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3453{ 2447{
3454 struct attn_route attn; 2448 struct attn_route attn;
3455 int port = BP_PORT(bp); 2449 int port = BP_PORT(bp);
@@ -3627,7 +2621,7 @@ static void bnx2x_sp_task(struct work_struct *work)
3627 IGU_INT_ENABLE, 1); 2621 IGU_INT_ENABLE, 1);
3628} 2622}
3629 2623
3630static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 2624irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3631{ 2625{
3632 struct net_device *dev = dev_instance; 2626 struct net_device *dev = dev_instance;
3633 struct bnx2x *bp = netdev_priv(dev); 2627 struct bnx2x *bp = netdev_priv(dev);
@@ -4859,7 +3853,7 @@ static const struct {
4859} 3853}
4860}; 3854};
4861 3855
4862static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 3856void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4863{ 3857{
4864 enum bnx2x_stats_state state = bp->stats_state; 3858 enum bnx2x_stats_state state = bp->stats_state;
4865 3859
@@ -5114,7 +4108,7 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5114 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); 4108 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5115} 4109}
5116 4110
5117static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 4111void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5118 dma_addr_t mapping, int sb_id) 4112 dma_addr_t mapping, int sb_id)
5119{ 4113{
5120 int port = BP_PORT(bp); 4114 int port = BP_PORT(bp);
@@ -5293,7 +4287,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
5293 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4287 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5294} 4288}
5295 4289
5296static void bnx2x_update_coalesce(struct bnx2x *bp) 4290void bnx2x_update_coalesce(struct bnx2x *bp)
5297{ 4291{
5298 int port = BP_PORT(bp); 4292 int port = BP_PORT(bp);
5299 int i; 4293 int i;
@@ -5323,207 +4317,6 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
5323 } 4317 }
5324} 4318}
5325 4319
5326static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5327 struct bnx2x_fastpath *fp, int last)
5328{
5329 int i;
5330
5331 for (i = 0; i < last; i++) {
5332 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5333 struct sk_buff *skb = rx_buf->skb;
5334
5335 if (skb == NULL) {
5336 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5337 continue;
5338 }
5339
5340 if (fp->tpa_state[i] == BNX2X_TPA_START)
5341 dma_unmap_single(&bp->pdev->dev,
5342 dma_unmap_addr(rx_buf, mapping),
5343 bp->rx_buf_size, DMA_FROM_DEVICE);
5344
5345 dev_kfree_skb(skb);
5346 rx_buf->skb = NULL;
5347 }
5348}
5349
5350static void bnx2x_init_rx_rings(struct bnx2x *bp)
5351{
5352 int func = BP_FUNC(bp);
5353 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5354 ETH_MAX_AGGREGATION_QUEUES_E1H;
5355 u16 ring_prod, cqe_ring_prod;
5356 int i, j;
5357
5358 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5359 DP(NETIF_MSG_IFUP,
5360 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5361
5362 if (bp->flags & TPA_ENABLE_FLAG) {
5363
5364 for_each_queue(bp, j) {
5365 struct bnx2x_fastpath *fp = &bp->fp[j];
5366
5367 for (i = 0; i < max_agg_queues; i++) {
5368 fp->tpa_pool[i].skb =
5369 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5370 if (!fp->tpa_pool[i].skb) {
5371 BNX2X_ERR("Failed to allocate TPA "
5372 "skb pool for queue[%d] - "
5373 "disabling TPA on this "
5374 "queue!\n", j);
5375 bnx2x_free_tpa_pool(bp, fp, i);
5376 fp->disable_tpa = 1;
5377 break;
5378 }
5379 dma_unmap_addr_set((struct sw_rx_bd *)
5380 &bp->fp->tpa_pool[i],
5381 mapping, 0);
5382 fp->tpa_state[i] = BNX2X_TPA_STOP;
5383 }
5384 }
5385 }
5386
5387 for_each_queue(bp, j) {
5388 struct bnx2x_fastpath *fp = &bp->fp[j];
5389
5390 fp->rx_bd_cons = 0;
5391 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5392 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5393
5394 /* "next page" elements initialization */
5395 /* SGE ring */
5396 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5397 struct eth_rx_sge *sge;
5398
5399 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5400 sge->addr_hi =
5401 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5402 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5403 sge->addr_lo =
5404 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5405 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5406 }
5407
5408 bnx2x_init_sge_ring_bit_mask(fp);
5409
5410 /* RX BD ring */
5411 for (i = 1; i <= NUM_RX_RINGS; i++) {
5412 struct eth_rx_bd *rx_bd;
5413
5414 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5415 rx_bd->addr_hi =
5416 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5417 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5418 rx_bd->addr_lo =
5419 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5420 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5421 }
5422
5423 /* CQ ring */
5424 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5425 struct eth_rx_cqe_next_page *nextpg;
5426
5427 nextpg = (struct eth_rx_cqe_next_page *)
5428 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5429 nextpg->addr_hi =
5430 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5431 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5432 nextpg->addr_lo =
5433 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5434 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5435 }
5436
5437 /* Allocate SGEs and initialize the ring elements */
5438 for (i = 0, ring_prod = 0;
5439 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5440
5441 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5442 BNX2X_ERR("was only able to allocate "
5443 "%d rx sges\n", i);
5444 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5445 /* Cleanup already allocated elements */
5446 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5447 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5448 fp->disable_tpa = 1;
5449 ring_prod = 0;
5450 break;
5451 }
5452 ring_prod = NEXT_SGE_IDX(ring_prod);
5453 }
5454 fp->rx_sge_prod = ring_prod;
5455
5456 /* Allocate BDs and initialize BD ring */
5457 fp->rx_comp_cons = 0;
5458 cqe_ring_prod = ring_prod = 0;
5459 for (i = 0; i < bp->rx_ring_size; i++) {
5460 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5461 BNX2X_ERR("was only able to allocate "
5462 "%d rx skbs on queue[%d]\n", i, j);
5463 fp->eth_q_stats.rx_skb_alloc_failed++;
5464 break;
5465 }
5466 ring_prod = NEXT_RX_IDX(ring_prod);
5467 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5468 WARN_ON(ring_prod <= i);
5469 }
5470
5471 fp->rx_bd_prod = ring_prod;
5472 /* must not have more available CQEs than BDs */
5473 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5474 cqe_ring_prod);
5475 fp->rx_pkt = fp->rx_calls = 0;
5476
5477 /* Warning!
5478 * this will generate an interrupt (to the TSTORM)
5479 * must only be done after chip is initialized
5480 */
5481 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5482 fp->rx_sge_prod);
5483 if (j != 0)
5484 continue;
5485
5486 REG_WR(bp, BAR_USTRORM_INTMEM +
5487 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5488 U64_LO(fp->rx_comp_mapping));
5489 REG_WR(bp, BAR_USTRORM_INTMEM +
5490 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5491 U64_HI(fp->rx_comp_mapping));
5492 }
5493}
5494
5495static void bnx2x_init_tx_ring(struct bnx2x *bp)
5496{
5497 int i, j;
5498
5499 for_each_queue(bp, j) {
5500 struct bnx2x_fastpath *fp = &bp->fp[j];
5501
5502 for (i = 1; i <= NUM_TX_RINGS; i++) {
5503 struct eth_tx_next_bd *tx_next_bd =
5504 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5505
5506 tx_next_bd->addr_hi =
5507 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5508 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5509 tx_next_bd->addr_lo =
5510 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5511 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5512 }
5513
5514 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5515 fp->tx_db.data.zero_fill1 = 0;
5516 fp->tx_db.data.prod = 0;
5517
5518 fp->tx_pkt_prod = 0;
5519 fp->tx_pkt_cons = 0;
5520 fp->tx_bd_prod = 0;
5521 fp->tx_bd_cons = 0;
5522 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5523 fp->tx_pkt = 0;
5524 }
5525}
5526
5527static void bnx2x_init_sp_ring(struct bnx2x *bp) 4320static void bnx2x_init_sp_ring(struct bnx2x *bp)
5528{ 4321{
5529 int func = BP_FUNC(bp); 4322 int func = BP_FUNC(bp);
@@ -5638,7 +4431,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
5638 bp->fp->cl_id + (i % bp->num_queues)); 4431 bp->fp->cl_id + (i % bp->num_queues));
5639} 4432}
5640 4433
5641static void bnx2x_set_client_config(struct bnx2x *bp) 4434void bnx2x_set_client_config(struct bnx2x *bp)
5642{ 4435{
5643 struct tstorm_eth_client_config tstorm_client = {0}; 4436 struct tstorm_eth_client_config tstorm_client = {0};
5644 int port = BP_PORT(bp); 4437 int port = BP_PORT(bp);
@@ -5671,7 +4464,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
5671 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); 4464 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5672} 4465}
5673 4466
5674static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4467void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5675{ 4468{
5676 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; 4469 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5677 int mode = bp->rx_mode; 4470 int mode = bp->rx_mode;
@@ -5991,7 +4784,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5991 } 4784 }
5992} 4785}
5993 4786
5994static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 4787void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5995{ 4788{
5996 int i; 4789 int i;
5997 4790
@@ -7072,7 +5865,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
7072 return 0; 5865 return 0;
7073} 5866}
7074 5867
7075static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 5868int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7076{ 5869{
7077 int i, rc = 0; 5870 int i, rc = 0;
7078 5871
@@ -7134,7 +5927,7 @@ init_hw_err:
7134 return rc; 5927 return rc;
7135} 5928}
7136 5929
7137static void bnx2x_free_mem(struct bnx2x *bp) 5930void bnx2x_free_mem(struct bnx2x *bp)
7138{ 5931{
7139 5932
7140#define BNX2X_PCI_FREE(x, y, size) \ 5933#define BNX2X_PCI_FREE(x, y, size) \
@@ -7216,7 +6009,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
7216#undef BNX2X_KFREE 6009#undef BNX2X_KFREE
7217} 6010}
7218 6011
7219static int bnx2x_alloc_mem(struct bnx2x *bp) 6012int bnx2x_alloc_mem(struct bnx2x *bp)
7220{ 6013{
7221 6014
7222#define BNX2X_PCI_ALLOC(x, y, size) \ 6015#define BNX2X_PCI_ALLOC(x, y, size) \
@@ -7322,264 +6115,6 @@ alloc_mem_err:
7322#undef BNX2X_ALLOC 6115#undef BNX2X_ALLOC
7323} 6116}
7324 6117
7325static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7326{
7327 int i;
7328
7329 for_each_queue(bp, i) {
7330 struct bnx2x_fastpath *fp = &bp->fp[i];
7331
7332 u16 bd_cons = fp->tx_bd_cons;
7333 u16 sw_prod = fp->tx_pkt_prod;
7334 u16 sw_cons = fp->tx_pkt_cons;
7335
7336 while (sw_cons != sw_prod) {
7337 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7338 sw_cons++;
7339 }
7340 }
7341}
7342
7343static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7344{
7345 int i, j;
7346
7347 for_each_queue(bp, j) {
7348 struct bnx2x_fastpath *fp = &bp->fp[j];
7349
7350 for (i = 0; i < NUM_RX_BD; i++) {
7351 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7352 struct sk_buff *skb = rx_buf->skb;
7353
7354 if (skb == NULL)
7355 continue;
7356
7357 dma_unmap_single(&bp->pdev->dev,
7358 dma_unmap_addr(rx_buf, mapping),
7359 bp->rx_buf_size, DMA_FROM_DEVICE);
7360
7361 rx_buf->skb = NULL;
7362 dev_kfree_skb(skb);
7363 }
7364 if (!fp->disable_tpa)
7365 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7366 ETH_MAX_AGGREGATION_QUEUES_E1 :
7367 ETH_MAX_AGGREGATION_QUEUES_E1H);
7368 }
7369}
7370
7371static void bnx2x_free_skbs(struct bnx2x *bp)
7372{
7373 bnx2x_free_tx_skbs(bp);
7374 bnx2x_free_rx_skbs(bp);
7375}
7376
7377static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7378{
7379 int i, offset = 1;
7380
7381 free_irq(bp->msix_table[0].vector, bp->dev);
7382 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7383 bp->msix_table[0].vector);
7384
7385#ifdef BCM_CNIC
7386 offset++;
7387#endif
7388 for_each_queue(bp, i) {
7389 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7390 "state %x\n", i, bp->msix_table[i + offset].vector,
7391 bnx2x_fp(bp, i, state));
7392
7393 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7394 }
7395}
7396
7397static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7398{
7399 if (bp->flags & USING_MSIX_FLAG) {
7400 if (!disable_only)
7401 bnx2x_free_msix_irqs(bp);
7402 pci_disable_msix(bp->pdev);
7403 bp->flags &= ~USING_MSIX_FLAG;
7404
7405 } else if (bp->flags & USING_MSI_FLAG) {
7406 if (!disable_only)
7407 free_irq(bp->pdev->irq, bp->dev);
7408 pci_disable_msi(bp->pdev);
7409 bp->flags &= ~USING_MSI_FLAG;
7410
7411 } else if (!disable_only)
7412 free_irq(bp->pdev->irq, bp->dev);
7413}
7414
7415static int bnx2x_enable_msix(struct bnx2x *bp)
7416{
7417 int i, rc, offset = 1;
7418 int igu_vec = 0;
7419
7420 bp->msix_table[0].entry = igu_vec;
7421 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7422
7423#ifdef BCM_CNIC
7424 igu_vec = BP_L_ID(bp) + offset;
7425 bp->msix_table[1].entry = igu_vec;
7426 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7427 offset++;
7428#endif
7429 for_each_queue(bp, i) {
7430 igu_vec = BP_L_ID(bp) + offset + i;
7431 bp->msix_table[i + offset].entry = igu_vec;
7432 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7433 "(fastpath #%u)\n", i + offset, igu_vec, i);
7434 }
7435
7436 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7437 BNX2X_NUM_QUEUES(bp) + offset);
7438
7439 /*
7440 * reconfigure number of tx/rx queues according to available
7441 * MSI-X vectors
7442 */
7443 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7444 /* vectors available for FP */
7445 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7446
7447 DP(NETIF_MSG_IFUP,
7448 "Trying to use less MSI-X vectors: %d\n", rc);
7449
7450 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7451
7452 if (rc) {
7453 DP(NETIF_MSG_IFUP,
7454 "MSI-X is not attainable rc %d\n", rc);
7455 return rc;
7456 }
7457
7458 bp->num_queues = min(bp->num_queues, fp_vec);
7459
7460 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7461 bp->num_queues);
7462 } else if (rc) {
7463 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7464 return rc;
7465 }
7466
7467 bp->flags |= USING_MSIX_FLAG;
7468
7469 return 0;
7470}
7471
7472static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7473{
7474 int i, rc, offset = 1;
7475
7476 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7477 bp->dev->name, bp->dev);
7478 if (rc) {
7479 BNX2X_ERR("request sp irq failed\n");
7480 return -EBUSY;
7481 }
7482
7483#ifdef BCM_CNIC
7484 offset++;
7485#endif
7486 for_each_queue(bp, i) {
7487 struct bnx2x_fastpath *fp = &bp->fp[i];
7488 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7489 bp->dev->name, i);
7490
7491 rc = request_irq(bp->msix_table[i + offset].vector,
7492 bnx2x_msix_fp_int, 0, fp->name, fp);
7493 if (rc) {
7494 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7495 bnx2x_free_msix_irqs(bp);
7496 return -EBUSY;
7497 }
7498
7499 fp->state = BNX2X_FP_STATE_IRQ;
7500 }
7501
7502 i = BNX2X_NUM_QUEUES(bp);
7503 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7504 " ... fp[%d] %d\n",
7505 bp->msix_table[0].vector,
7506 0, bp->msix_table[offset].vector,
7507 i - 1, bp->msix_table[offset + i - 1].vector);
7508
7509 return 0;
7510}
7511
7512static int bnx2x_enable_msi(struct bnx2x *bp)
7513{
7514 int rc;
7515
7516 rc = pci_enable_msi(bp->pdev);
7517 if (rc) {
7518 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7519 return -1;
7520 }
7521 bp->flags |= USING_MSI_FLAG;
7522
7523 return 0;
7524}
7525
7526static int bnx2x_req_irq(struct bnx2x *bp)
7527{
7528 unsigned long flags;
7529 int rc;
7530
7531 if (bp->flags & USING_MSI_FLAG)
7532 flags = 0;
7533 else
7534 flags = IRQF_SHARED;
7535
7536 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7537 bp->dev->name, bp->dev);
7538 if (!rc)
7539 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7540
7541 return rc;
7542}
7543
7544static void bnx2x_napi_enable(struct bnx2x *bp)
7545{
7546 int i;
7547
7548 for_each_queue(bp, i)
7549 napi_enable(&bnx2x_fp(bp, i, napi));
7550}
7551
7552static void bnx2x_napi_disable(struct bnx2x *bp)
7553{
7554 int i;
7555
7556 for_each_queue(bp, i)
7557 napi_disable(&bnx2x_fp(bp, i, napi));
7558}
7559
7560static void bnx2x_netif_start(struct bnx2x *bp)
7561{
7562 int intr_sem;
7563
7564 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7565 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7566
7567 if (intr_sem) {
7568 if (netif_running(bp->dev)) {
7569 bnx2x_napi_enable(bp);
7570 bnx2x_int_enable(bp);
7571 if (bp->state == BNX2X_STATE_OPEN)
7572 netif_tx_wake_all_queues(bp->dev);
7573 }
7574 }
7575}
7576
7577static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7578{
7579 bnx2x_int_disable_sync(bp, disable_hw);
7580 bnx2x_napi_disable(bp);
7581 netif_tx_disable(bp->dev);
7582}
7583 6118
7584/* 6119/*
7585 * Init service functions 6120 * Init service functions
@@ -7750,7 +6285,7 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7750 return -EBUSY; 6285 return -EBUSY;
7751} 6286}
7752 6287
7753static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) 6288void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7754{ 6289{
7755 bp->set_mac_pending++; 6290 bp->set_mac_pending++;
7756 smp_wmb(); 6291 smp_wmb();
@@ -7762,7 +6297,7 @@ static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7762 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); 6297 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7763} 6298}
7764 6299
7765static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) 6300void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7766{ 6301{
7767 bp->set_mac_pending++; 6302 bp->set_mac_pending++;
7768 smp_wmb(); 6303 smp_wmb();
@@ -7786,7 +6321,7 @@ static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7786 * 6321 *
7787 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 6322 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7788 */ 6323 */
7789static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 6324int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7790{ 6325{
7791 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); 6326 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7792 6327
@@ -7813,7 +6348,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7813} 6348}
7814#endif 6349#endif
7815 6350
7816static int bnx2x_setup_leading(struct bnx2x *bp) 6351int bnx2x_setup_leading(struct bnx2x *bp)
7817{ 6352{
7818 int rc; 6353 int rc;
7819 6354
@@ -7829,7 +6364,7 @@ static int bnx2x_setup_leading(struct bnx2x *bp)
7829 return rc; 6364 return rc;
7830} 6365}
7831 6366
7832static int bnx2x_setup_multi(struct bnx2x *bp, int index) 6367int bnx2x_setup_multi(struct bnx2x *bp, int index)
7833{ 6368{
7834 struct bnx2x_fastpath *fp = &bp->fp[index]; 6369 struct bnx2x_fastpath *fp = &bp->fp[index];
7835 6370
@@ -7846,9 +6381,8 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7846 &(fp->state), 0); 6381 &(fp->state), 0);
7847} 6382}
7848 6383
7849static int bnx2x_poll(struct napi_struct *napi, int budget);
7850 6384
7851static void bnx2x_set_num_queues_msix(struct bnx2x *bp) 6385void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7852{ 6386{
7853 6387
7854 switch (bp->multi_mode) { 6388 switch (bp->multi_mode) {
@@ -7872,292 +6406,7 @@ static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7872 } 6406 }
7873} 6407}
7874 6408
7875static int bnx2x_set_num_queues(struct bnx2x *bp)
7876{
7877 int rc = 0;
7878
7879 switch (bp->int_mode) {
7880 case INT_MODE_INTx:
7881 case INT_MODE_MSI:
7882 bp->num_queues = 1;
7883 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7884 break;
7885 default:
7886 /* Set number of queues according to bp->multi_mode value */
7887 bnx2x_set_num_queues_msix(bp);
7888
7889 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7890 bp->num_queues);
7891
7892 /* if we can't use MSI-X we only need one fp,
7893 * so try to enable MSI-X with the requested number of fp's
7894 * and fallback to MSI or legacy INTx with one fp
7895 */
7896 rc = bnx2x_enable_msix(bp);
7897 if (rc)
7898 /* failed to enable MSI-X */
7899 bp->num_queues = 1;
7900 break;
7901 }
7902 bp->dev->real_num_tx_queues = bp->num_queues;
7903 return rc;
7904}
7905
7906#ifdef BCM_CNIC
7907static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7908static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7909#endif
7910
7911/* must be called with rtnl_lock */
7912static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7913{
7914 u32 load_code;
7915 int i, rc;
7916
7917#ifdef BNX2X_STOP_ON_ERROR
7918 if (unlikely(bp->panic))
7919 return -EPERM;
7920#endif
7921
7922 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7923
7924 rc = bnx2x_set_num_queues(bp);
7925
7926 if (bnx2x_alloc_mem(bp)) {
7927 bnx2x_free_irq(bp, true);
7928 return -ENOMEM;
7929 }
7930
7931 for_each_queue(bp, i)
7932 bnx2x_fp(bp, i, disable_tpa) =
7933 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7934
7935 for_each_queue(bp, i)
7936 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7937 bnx2x_poll, 128);
7938
7939 bnx2x_napi_enable(bp);
7940
7941 if (bp->flags & USING_MSIX_FLAG) {
7942 rc = bnx2x_req_msix_irqs(bp);
7943 if (rc) {
7944 bnx2x_free_irq(bp, true);
7945 goto load_error1;
7946 }
7947 } else {
7948 /* Fall to INTx if failed to enable MSI-X due to lack of
7949 memory (in bnx2x_set_num_queues()) */
7950 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7951 bnx2x_enable_msi(bp);
7952 bnx2x_ack_int(bp);
7953 rc = bnx2x_req_irq(bp);
7954 if (rc) {
7955 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7956 bnx2x_free_irq(bp, true);
7957 goto load_error1;
7958 }
7959 if (bp->flags & USING_MSI_FLAG) {
7960 bp->dev->irq = bp->pdev->irq;
7961 netdev_info(bp->dev, "using MSI IRQ %d\n",
7962 bp->pdev->irq);
7963 }
7964 }
7965
7966 /* Send LOAD_REQUEST command to MCP
7967 Returns the type of LOAD command:
7968 if it is the first port to be initialized
7969 common blocks should be initialized, otherwise - not
7970 */
7971 if (!BP_NOMCP(bp)) {
7972 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7973 if (!load_code) {
7974 BNX2X_ERR("MCP response failure, aborting\n");
7975 rc = -EBUSY;
7976 goto load_error2;
7977 }
7978 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7979 rc = -EBUSY; /* other port in diagnostic mode */
7980 goto load_error2;
7981 }
7982
7983 } else {
7984 int port = BP_PORT(bp);
7985
7986 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7987 load_count[0], load_count[1], load_count[2]);
7988 load_count[0]++;
7989 load_count[1 + port]++;
7990 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7991 load_count[0], load_count[1], load_count[2]);
7992 if (load_count[0] == 1)
7993 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7994 else if (load_count[1 + port] == 1)
7995 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7996 else
7997 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7998 }
7999
8000 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8001 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8002 bp->port.pmf = 1;
8003 else
8004 bp->port.pmf = 0;
8005 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
8006
8007 /* Initialize HW */
8008 rc = bnx2x_init_hw(bp, load_code);
8009 if (rc) {
8010 BNX2X_ERR("HW init failed, aborting\n");
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8012 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8013 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8014 goto load_error2;
8015 }
8016
8017 /* Setup NIC internals and enable interrupts */
8018 bnx2x_nic_init(bp, load_code);
8019
8020 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8021 (bp->common.shmem2_base))
8022 SHMEM2_WR(bp, dcc_support,
8023 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8024 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8025 6409
8026 /* Send LOAD_DONE command to MCP */
8027 if (!BP_NOMCP(bp)) {
8028 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8029 if (!load_code) {
8030 BNX2X_ERR("MCP response failure, aborting\n");
8031 rc = -EBUSY;
8032 goto load_error3;
8033 }
8034 }
8035
8036 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8037
8038 rc = bnx2x_setup_leading(bp);
8039 if (rc) {
8040 BNX2X_ERR("Setup leading failed!\n");
8041#ifndef BNX2X_STOP_ON_ERROR
8042 goto load_error3;
8043#else
8044 bp->panic = 1;
8045 return -EBUSY;
8046#endif
8047 }
8048
8049 if (CHIP_IS_E1H(bp))
8050 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8051 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8052 bp->flags |= MF_FUNC_DIS;
8053 }
8054
8055 if (bp->state == BNX2X_STATE_OPEN) {
8056#ifdef BCM_CNIC
8057 /* Enable Timer scan */
8058 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8059#endif
8060 for_each_nondefault_queue(bp, i) {
8061 rc = bnx2x_setup_multi(bp, i);
8062 if (rc)
8063#ifdef BCM_CNIC
8064 goto load_error4;
8065#else
8066 goto load_error3;
8067#endif
8068 }
8069
8070 if (CHIP_IS_E1(bp))
8071 bnx2x_set_eth_mac_addr_e1(bp, 1);
8072 else
8073 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8074#ifdef BCM_CNIC
8075 /* Set iSCSI L2 MAC */
8076 mutex_lock(&bp->cnic_mutex);
8077 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8078 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8079 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8080 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8081 CNIC_SB_ID(bp));
8082 }
8083 mutex_unlock(&bp->cnic_mutex);
8084#endif
8085 }
8086
8087 if (bp->port.pmf)
8088 bnx2x_initial_phy_init(bp, load_mode);
8089
8090 /* Start fast path */
8091 switch (load_mode) {
8092 case LOAD_NORMAL:
8093 if (bp->state == BNX2X_STATE_OPEN) {
8094 /* Tx queue should be only reenabled */
8095 netif_tx_wake_all_queues(bp->dev);
8096 }
8097 /* Initialize the receive filter. */
8098 bnx2x_set_rx_mode(bp->dev);
8099 break;
8100
8101 case LOAD_OPEN:
8102 netif_tx_start_all_queues(bp->dev);
8103 if (bp->state != BNX2X_STATE_OPEN)
8104 netif_tx_disable(bp->dev);
8105 /* Initialize the receive filter. */
8106 bnx2x_set_rx_mode(bp->dev);
8107 break;
8108
8109 case LOAD_DIAG:
8110 /* Initialize the receive filter. */
8111 bnx2x_set_rx_mode(bp->dev);
8112 bp->state = BNX2X_STATE_DIAG;
8113 break;
8114
8115 default:
8116 break;
8117 }
8118
8119 if (!bp->port.pmf)
8120 bnx2x__link_status_update(bp);
8121
8122 /* start the timer */
8123 mod_timer(&bp->timer, jiffies + bp->current_interval);
8124
8125#ifdef BCM_CNIC
8126 bnx2x_setup_cnic_irq_info(bp);
8127 if (bp->state == BNX2X_STATE_OPEN)
8128 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8129#endif
8130 bnx2x_inc_load_cnt(bp);
8131
8132 return 0;
8133
8134#ifdef BCM_CNIC
8135load_error4:
8136 /* Disable Timer scan */
8137 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8138#endif
8139load_error3:
8140 bnx2x_int_disable_sync(bp, 1);
8141 if (!BP_NOMCP(bp)) {
8142 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8143 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8144 }
8145 bp->port.pmf = 0;
8146 /* Free SKBs, SGEs, TPA pool and driver internals */
8147 bnx2x_free_skbs(bp);
8148 for_each_queue(bp, i)
8149 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8150load_error2:
8151 /* Release IRQs */
8152 bnx2x_free_irq(bp, false);
8153load_error1:
8154 bnx2x_napi_disable(bp);
8155 for_each_queue(bp, i)
8156 netif_napi_del(&bnx2x_fp(bp, i, napi));
8157 bnx2x_free_mem(bp);
8158
8159 return rc;
8160}
8161 6410
8162static int bnx2x_stop_multi(struct bnx2x *bp, int index) 6411static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8163{ 6412{
@@ -8315,7 +6564,7 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8315 } 6564 }
8316} 6565}
8317 6566
8318static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 6567void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8319{ 6568{
8320 int port = BP_PORT(bp); 6569 int port = BP_PORT(bp);
8321 u32 reset_code = 0; 6570 u32 reset_code = 0;
@@ -8463,7 +6712,7 @@ unload_error:
8463 6712
8464} 6713}
8465 6714
8466static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp) 6715void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8467{ 6716{
8468 u32 val; 6717 u32 val;
8469 6718
@@ -8485,71 +6734,6 @@ static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8485 } 6734 }
8486} 6735}
8487 6736
8488/* must be called with rtnl_lock */
8489static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8490{
8491 int i;
8492
8493 if (bp->state == BNX2X_STATE_CLOSED) {
8494 /* Interface has been removed - nothing to recover */
8495 bp->recovery_state = BNX2X_RECOVERY_DONE;
8496 bp->is_leader = 0;
8497 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8498 smp_wmb();
8499
8500 return -EINVAL;
8501 }
8502
8503#ifdef BCM_CNIC
8504 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8505#endif
8506 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8507
8508 /* Set "drop all" */
8509 bp->rx_mode = BNX2X_RX_MODE_NONE;
8510 bnx2x_set_storm_rx_mode(bp);
8511
8512 /* Disable HW interrupts, NAPI and Tx */
8513 bnx2x_netif_stop(bp, 1);
8514 netif_carrier_off(bp->dev);
8515
8516 del_timer_sync(&bp->timer);
8517 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8518 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8519 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8520
8521 /* Release IRQs */
8522 bnx2x_free_irq(bp, false);
8523
8524 /* Cleanup the chip if needed */
8525 if (unload_mode != UNLOAD_RECOVERY)
8526 bnx2x_chip_cleanup(bp, unload_mode);
8527
8528 bp->port.pmf = 0;
8529
8530 /* Free SKBs, SGEs, TPA pool and driver internals */
8531 bnx2x_free_skbs(bp);
8532 for_each_queue(bp, i)
8533 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8534 for_each_queue(bp, i)
8535 netif_napi_del(&bnx2x_fp(bp, i, napi));
8536 bnx2x_free_mem(bp);
8537
8538 bp->state = BNX2X_STATE_CLOSED;
8539
8540 /* The last driver must disable a "close the gate" if there is no
8541 * parity attention or "process kill" pending.
8542 */
8543 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8544 bnx2x_reset_is_done(bp))
8545 bnx2x_disable_close_the_gate(bp);
8546
8547 /* Reset MCP mail box sequence if there is on going recovery */
8548 if (unload_mode == UNLOAD_RECOVERY)
8549 bp->fw_seq = 0;
8550
8551 return 0;
8552}
8553 6737
8554/* Close gates #2, #3 and #4: */ 6738/* Close gates #2, #3 and #4: */
8555static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 6739static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
@@ -8862,8 +7046,6 @@ exit_leader_reset:
8862 return rc; 7046 return rc;
8863} 7047}
8864 7048
8865static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8866
8867/* Assumption: runs under rtnl lock. This together with the fact 7049/* Assumption: runs under rtnl lock. This together with the fact
8868 * that it's called only from bnx2x_reset_task() ensure that it 7050 * that it's called only from bnx2x_reset_task() ensure that it
8869 * will never be called when netif_running(bp->dev) is false. 7051 * will never be called when netif_running(bp->dev) is false.
@@ -11938,598 +10120,6 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
11938 10120
11939/* end of ethtool_ops */ 10121/* end of ethtool_ops */
11940 10122
11941/****************************************************************************
11942* General service functions
11943****************************************************************************/
11944
11945static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11946{
11947 u16 pmcsr;
11948
11949 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11950
11951 switch (state) {
11952 case PCI_D0:
11953 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11954 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11955 PCI_PM_CTRL_PME_STATUS));
11956
11957 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11958 /* delay required during transition out of D3hot */
11959 msleep(20);
11960 break;
11961
11962 case PCI_D3hot:
11963 /* If there are other clients above don't
11964 shut down the power */
11965 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11966 return 0;
11967 /* Don't shut down the power for emulation and FPGA */
11968 if (CHIP_REV_IS_SLOW(bp))
11969 return 0;
11970
11971 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11972 pmcsr |= 3;
11973
11974 if (bp->wol)
11975 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11976
11977 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11978 pmcsr);
11979
11980 /* No more memory access after this point until
11981 * device is brought back to D0.
11982 */
11983 break;
11984
11985 default:
11986 return -EINVAL;
11987 }
11988 return 0;
11989}
11990
11991static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11992{
11993 u16 rx_cons_sb;
11994
11995 /* Tell compiler that status block fields can change */
11996 barrier();
11997 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11998 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11999 rx_cons_sb++;
12000 return (fp->rx_comp_cons != rx_cons_sb);
12001}
12002
12003/*
12004 * net_device service functions
12005 */
12006
12007static int bnx2x_poll(struct napi_struct *napi, int budget)
12008{
12009 int work_done = 0;
12010 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12011 napi);
12012 struct bnx2x *bp = fp->bp;
12013
12014 while (1) {
12015#ifdef BNX2X_STOP_ON_ERROR
12016 if (unlikely(bp->panic)) {
12017 napi_complete(napi);
12018 return 0;
12019 }
12020#endif
12021
12022 if (bnx2x_has_tx_work(fp))
12023 bnx2x_tx_int(fp);
12024
12025 if (bnx2x_has_rx_work(fp)) {
12026 work_done += bnx2x_rx_int(fp, budget - work_done);
12027
12028 /* must not complete if we consumed full budget */
12029 if (work_done >= budget)
12030 break;
12031 }
12032
12033 /* Fall out from the NAPI loop if needed */
12034 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12035 bnx2x_update_fpsb_idx(fp);
12036 /* bnx2x_has_rx_work() reads the status block, thus we need
12037 * to ensure that status block indices have been actually read
12038 * (bnx2x_update_fpsb_idx) prior to this check
12039 * (bnx2x_has_rx_work) so that we won't write the "newer"
12040 * value of the status block to IGU (if there was a DMA right
12041 * after bnx2x_has_rx_work and if there is no rmb, the memory
12042 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12043 * before bnx2x_ack_sb). In this case there will never be
12044 * another interrupt until there is another update of the
12045 * status block, while there is still unhandled work.
12046 */
12047 rmb();
12048
12049 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12050 napi_complete(napi);
12051 /* Re-enable interrupts */
12052 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12053 le16_to_cpu(fp->fp_c_idx),
12054 IGU_INT_NOP, 1);
12055 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12056 le16_to_cpu(fp->fp_u_idx),
12057 IGU_INT_ENABLE, 1);
12058 break;
12059 }
12060 }
12061 }
12062
12063 return work_done;
12064}
12065
12066
12067/* we split the first BD into headers and data BDs
12068 * to ease the pain of our fellow microcode engineers
12069 * we use one mapping for both BDs
12070 * So far this has only been observed to happen
12071 * in Other Operating Systems(TM)
12072 */
12073static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12074 struct bnx2x_fastpath *fp,
12075 struct sw_tx_bd *tx_buf,
12076 struct eth_tx_start_bd **tx_bd, u16 hlen,
12077 u16 bd_prod, int nbd)
12078{
12079 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12080 struct eth_tx_bd *d_tx_bd;
12081 dma_addr_t mapping;
12082 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12083
12084 /* first fix first BD */
12085 h_tx_bd->nbd = cpu_to_le16(nbd);
12086 h_tx_bd->nbytes = cpu_to_le16(hlen);
12087
12088 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12089 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12090 h_tx_bd->addr_lo, h_tx_bd->nbd);
12091
12092 /* now get a new data BD
12093 * (after the pbd) and fill it */
12094 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12095 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12096
12097 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12098 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12099
12100 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12101 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12102 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12103
12104 /* this marks the BD as one that has no individual mapping */
12105 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12106
12107 DP(NETIF_MSG_TX_QUEUED,
12108 "TSO split data size is %d (%x:%x)\n",
12109 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12110
12111 /* update tx_bd */
12112 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12113
12114 return bd_prod;
12115}
12116
12117static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12118{
12119 if (fix > 0)
12120 csum = (u16) ~csum_fold(csum_sub(csum,
12121 csum_partial(t_header - fix, fix, 0)));
12122
12123 else if (fix < 0)
12124 csum = (u16) ~csum_fold(csum_add(csum,
12125 csum_partial(t_header, -fix, 0)));
12126
12127 return swab16(csum);
12128}
12129
12130static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12131{
12132 u32 rc;
12133
12134 if (skb->ip_summed != CHECKSUM_PARTIAL)
12135 rc = XMIT_PLAIN;
12136
12137 else {
12138 if (skb->protocol == htons(ETH_P_IPV6)) {
12139 rc = XMIT_CSUM_V6;
12140 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12141 rc |= XMIT_CSUM_TCP;
12142
12143 } else {
12144 rc = XMIT_CSUM_V4;
12145 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12146 rc |= XMIT_CSUM_TCP;
12147 }
12148 }
12149
12150 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12151 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12152
12153 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12154 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12155
12156 return rc;
12157}
12158
12159#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12160/* check if packet requires linearization (packet is too fragmented)
12161 no need to check fragmentation if page size > 8K (there will be no
12162 violation to FW restrictions) */
12163static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12164 u32 xmit_type)
12165{
12166 int to_copy = 0;
12167 int hlen = 0;
12168 int first_bd_sz = 0;
12169
12170 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12171 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12172
12173 if (xmit_type & XMIT_GSO) {
12174 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12175 /* Check if LSO packet needs to be copied:
12176 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12177 int wnd_size = MAX_FETCH_BD - 3;
12178 /* Number of windows to check */
12179 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12180 int wnd_idx = 0;
12181 int frag_idx = 0;
12182 u32 wnd_sum = 0;
12183
12184 /* Headers length */
12185 hlen = (int)(skb_transport_header(skb) - skb->data) +
12186 tcp_hdrlen(skb);
12187
12188 /* Amount of data (w/o headers) on linear part of SKB*/
12189 first_bd_sz = skb_headlen(skb) - hlen;
12190
12191 wnd_sum = first_bd_sz;
12192
12193 /* Calculate the first sum - it's special */
12194 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12195 wnd_sum +=
12196 skb_shinfo(skb)->frags[frag_idx].size;
12197
12198 /* If there was data on linear skb data - check it */
12199 if (first_bd_sz > 0) {
12200 if (unlikely(wnd_sum < lso_mss)) {
12201 to_copy = 1;
12202 goto exit_lbl;
12203 }
12204
12205 wnd_sum -= first_bd_sz;
12206 }
12207
12208 /* Others are easier: run through the frag list and
12209 check all windows */
12210 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12211 wnd_sum +=
12212 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12213
12214 if (unlikely(wnd_sum < lso_mss)) {
12215 to_copy = 1;
12216 break;
12217 }
12218 wnd_sum -=
12219 skb_shinfo(skb)->frags[wnd_idx].size;
12220 }
12221 } else {
12222 /* in non-LSO too fragmented packet should always
12223 be linearized */
12224 to_copy = 1;
12225 }
12226 }
12227
12228exit_lbl:
12229 if (unlikely(to_copy))
12230 DP(NETIF_MSG_TX_QUEUED,
12231 "Linearization IS REQUIRED for %s packet. "
12232 "num_frags %d hlen %d first_bd_sz %d\n",
12233 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12234 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12235
12236 return to_copy;
12237}
12238#endif
12239
12240/* called with netif_tx_lock
12241 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12242 * netif_wake_queue()
12243 */
12244static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12245{
12246 struct bnx2x *bp = netdev_priv(dev);
12247 struct bnx2x_fastpath *fp;
12248 struct netdev_queue *txq;
12249 struct sw_tx_bd *tx_buf;
12250 struct eth_tx_start_bd *tx_start_bd;
12251 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12252 struct eth_tx_parse_bd *pbd = NULL;
12253 u16 pkt_prod, bd_prod;
12254 int nbd, fp_index;
12255 dma_addr_t mapping;
12256 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12257 int i;
12258 u8 hlen = 0;
12259 __le16 pkt_size = 0;
12260 struct ethhdr *eth;
12261 u8 mac_type = UNICAST_ADDRESS;
12262
12263#ifdef BNX2X_STOP_ON_ERROR
12264 if (unlikely(bp->panic))
12265 return NETDEV_TX_BUSY;
12266#endif
12267
12268 fp_index = skb_get_queue_mapping(skb);
12269 txq = netdev_get_tx_queue(dev, fp_index);
12270
12271 fp = &bp->fp[fp_index];
12272
12273 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12274 fp->eth_q_stats.driver_xoff++;
12275 netif_tx_stop_queue(txq);
12276 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12277 return NETDEV_TX_BUSY;
12278 }
12279
12280 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12281 " gso type %x xmit_type %x\n",
12282 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12283 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12284
12285 eth = (struct ethhdr *)skb->data;
12286
12287 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12288 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12289 if (is_broadcast_ether_addr(eth->h_dest))
12290 mac_type = BROADCAST_ADDRESS;
12291 else
12292 mac_type = MULTICAST_ADDRESS;
12293 }
12294
12295#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12296 /* First, check if we need to linearize the skb (due to FW
12297 restrictions). No need to check fragmentation if page size > 8K
12298 (there will be no violation to FW restrictions) */
12299 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12300 /* Statistics of linearization */
12301 bp->lin_cnt++;
12302 if (skb_linearize(skb) != 0) {
12303 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12304 "silently dropping this SKB\n");
12305 dev_kfree_skb_any(skb);
12306 return NETDEV_TX_OK;
12307 }
12308 }
12309#endif
12310
12311 /*
12312 Please read carefully. First we use one BD which we mark as start,
12313 then we have a parsing info BD (used for TSO or xsum),
12314 and only then we have the rest of the TSO BDs.
12315 (don't forget to mark the last one as last,
12316 and to unmap only AFTER you write to the BD ...)
12317 And above all, all pdb sizes are in words - NOT DWORDS!
12318 */
12319
12320 pkt_prod = fp->tx_pkt_prod++;
12321 bd_prod = TX_BD(fp->tx_bd_prod);
12322
12323 /* get a tx_buf and first BD */
12324 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12325 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12326
12327 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12328 tx_start_bd->general_data = (mac_type <<
12329 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12330 /* header nbd */
12331 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12332
12333 /* remember the first BD of the packet */
12334 tx_buf->first_bd = fp->tx_bd_prod;
12335 tx_buf->skb = skb;
12336 tx_buf->flags = 0;
12337
12338 DP(NETIF_MSG_TX_QUEUED,
12339 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12340 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12341
12342#ifdef BCM_VLAN
12343 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12344 (bp->flags & HW_VLAN_TX_FLAG)) {
12345 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12346 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12347 } else
12348#endif
12349 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12350
12351 /* turn on parsing and get a BD */
12352 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12353 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12354
12355 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12356
12357 if (xmit_type & XMIT_CSUM) {
12358 hlen = (skb_network_header(skb) - skb->data) / 2;
12359
12360 /* for now NS flag is not used in Linux */
12361 pbd->global_data =
12362 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12363 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12364
12365 pbd->ip_hlen = (skb_transport_header(skb) -
12366 skb_network_header(skb)) / 2;
12367
12368 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12369
12370 pbd->total_hlen = cpu_to_le16(hlen);
12371 hlen = hlen*2;
12372
12373 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12374
12375 if (xmit_type & XMIT_CSUM_V4)
12376 tx_start_bd->bd_flags.as_bitfield |=
12377 ETH_TX_BD_FLAGS_IP_CSUM;
12378 else
12379 tx_start_bd->bd_flags.as_bitfield |=
12380 ETH_TX_BD_FLAGS_IPV6;
12381
12382 if (xmit_type & XMIT_CSUM_TCP) {
12383 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12384
12385 } else {
12386 s8 fix = SKB_CS_OFF(skb); /* signed! */
12387
12388 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12389
12390 DP(NETIF_MSG_TX_QUEUED,
12391 "hlen %d fix %d csum before fix %x\n",
12392 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12393
12394 /* HW bug: fixup the CSUM */
12395 pbd->tcp_pseudo_csum =
12396 bnx2x_csum_fix(skb_transport_header(skb),
12397 SKB_CS(skb), fix);
12398
12399 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12400 pbd->tcp_pseudo_csum);
12401 }
12402 }
12403
12404 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12405 skb_headlen(skb), DMA_TO_DEVICE);
12406
12407 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12408 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12409 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12410 tx_start_bd->nbd = cpu_to_le16(nbd);
12411 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12412 pkt_size = tx_start_bd->nbytes;
12413
12414 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12415 " nbytes %d flags %x vlan %x\n",
12416 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12417 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12418 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12419
12420 if (xmit_type & XMIT_GSO) {
12421
12422 DP(NETIF_MSG_TX_QUEUED,
12423 "TSO packet len %d hlen %d total len %d tso size %d\n",
12424 skb->len, hlen, skb_headlen(skb),
12425 skb_shinfo(skb)->gso_size);
12426
12427 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12428
12429 if (unlikely(skb_headlen(skb) > hlen))
12430 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12431 hlen, bd_prod, ++nbd);
12432
12433 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12434 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12435 pbd->tcp_flags = pbd_tcp_flags(skb);
12436
12437 if (xmit_type & XMIT_GSO_V4) {
12438 pbd->ip_id = swab16(ip_hdr(skb)->id);
12439 pbd->tcp_pseudo_csum =
12440 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12441 ip_hdr(skb)->daddr,
12442 0, IPPROTO_TCP, 0));
12443
12444 } else
12445 pbd->tcp_pseudo_csum =
12446 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12447 &ipv6_hdr(skb)->daddr,
12448 0, IPPROTO_TCP, 0));
12449
12450 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12451 }
12452 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12453
12454 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12455 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12456
12457 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12458 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12459 if (total_pkt_bd == NULL)
12460 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12461
12462 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12463 frag->page_offset,
12464 frag->size, DMA_TO_DEVICE);
12465
12466 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12467 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12468 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12469 le16_add_cpu(&pkt_size, frag->size);
12470
12471 DP(NETIF_MSG_TX_QUEUED,
12472 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12473 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12474 le16_to_cpu(tx_data_bd->nbytes));
12475 }
12476
12477 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12478
12479 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12480
12481 /* now send a tx doorbell, counting the next BD
12482 * if the packet contains or ends with it
12483 */
12484 if (TX_BD_POFF(bd_prod) < nbd)
12485 nbd++;
12486
12487 if (total_pkt_bd != NULL)
12488 total_pkt_bd->total_pkt_bytes = pkt_size;
12489
12490 if (pbd)
12491 DP(NETIF_MSG_TX_QUEUED,
12492 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12493 " tcp_flags %x xsum %x seq %u hlen %u\n",
12494 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12495 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12496 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12497
12498 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12499
12500 /*
12501 * Make sure that the BD data is updated before updating the producer
12502 * since FW might read the BD right after the producer is updated.
12503 * This is only applicable for weak-ordered memory model archs such
12504 * as IA-64. The following barrier is also mandatory since FW will
12505 * assumes packets must have BDs.
12506 */
12507 wmb();
12508
12509 fp->tx_db.data.prod += nbd;
12510 barrier();
12511 DOORBELL(bp, fp->index, fp->tx_db.raw);
12512
12513 mmiowb();
12514
12515 fp->tx_bd_prod += nbd;
12516
12517 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12518 netif_tx_stop_queue(txq);
12519
12520 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12521 * ordering of set_bit() in netif_tx_stop_queue() and read of
12522 * fp->bd_tx_cons */
12523 smp_mb();
12524
12525 fp->eth_q_stats.driver_xoff++;
12526 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12527 netif_tx_wake_queue(txq);
12528 }
12529 fp->tx_pkt++;
12530
12531 return NETDEV_TX_OK;
12532}
12533 10123
12534/* called with rtnl_lock */ 10124/* called with rtnl_lock */
12535static int bnx2x_open(struct net_device *dev) 10125static int bnx2x_open(struct net_device *dev)
@@ -12590,7 +10180,7 @@ static int bnx2x_close(struct net_device *dev)
12590} 10180}
12591 10181
12592/* called with netif_tx_lock from dev_mcast.c */ 10182/* called with netif_tx_lock from dev_mcast.c */
12593static void bnx2x_set_rx_mode(struct net_device *dev) 10183void bnx2x_set_rx_mode(struct net_device *dev)
12594{ 10184{
12595 struct bnx2x *bp = netdev_priv(dev); 10185 struct bnx2x *bp = netdev_priv(dev);
12596 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 10186 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
@@ -12710,25 +10300,6 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
12710 bnx2x_set_storm_rx_mode(bp); 10300 bnx2x_set_storm_rx_mode(bp);
12711} 10301}
12712 10302
12713/* called with rtnl_lock */
12714static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12715{
12716 struct sockaddr *addr = p;
12717 struct bnx2x *bp = netdev_priv(dev);
12718
12719 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12720 return -EINVAL;
12721
12722 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12723 if (netif_running(dev)) {
12724 if (CHIP_IS_E1(bp))
12725 bnx2x_set_eth_mac_addr_e1(bp, 1);
12726 else
12727 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12728 }
12729
12730 return 0;
12731}
12732 10303
12733/* called with rtnl_lock */ 10304/* called with rtnl_lock */
12734static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 10305static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
@@ -12804,71 +10375,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12804 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 10375 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12805} 10376}
12806 10377
12807/* called with rtnl_lock */
12808static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12809{
12810 struct bnx2x *bp = netdev_priv(dev);
12811 int rc = 0;
12812
12813 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12814 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12815 return -EAGAIN;
12816 }
12817
12818 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12819 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12820 return -EINVAL;
12821
12822 /* This does not race with packet allocation
12823 * because the actual alloc size is
12824 * only updated as part of load
12825 */
12826 dev->mtu = new_mtu;
12827
12828 if (netif_running(dev)) {
12829 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12830 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12831 }
12832
12833 return rc;
12834}
12835
12836static void bnx2x_tx_timeout(struct net_device *dev)
12837{
12838 struct bnx2x *bp = netdev_priv(dev);
12839
12840#ifdef BNX2X_STOP_ON_ERROR
12841 if (!bp->panic)
12842 bnx2x_panic();
12843#endif
12844 /* This allows the netif to be shutdown gracefully before resetting */
12845 schedule_delayed_work(&bp->reset_task, 0);
12846}
12847
12848#ifdef BCM_VLAN
12849/* called with rtnl_lock */
12850static void bnx2x_vlan_rx_register(struct net_device *dev,
12851 struct vlan_group *vlgrp)
12852{
12853 struct bnx2x *bp = netdev_priv(dev);
12854
12855 bp->vlgrp = vlgrp;
12856
12857 /* Set flags according to the required capabilities */
12858 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12859
12860 if (dev->features & NETIF_F_HW_VLAN_TX)
12861 bp->flags |= HW_VLAN_TX_FLAG;
12862
12863 if (dev->features & NETIF_F_HW_VLAN_RX)
12864 bp->flags |= HW_VLAN_RX_FLAG;
12865
12866 if (netif_running(dev))
12867 bnx2x_set_client_config(bp);
12868}
12869
12870#endif
12871
12872#ifdef CONFIG_NET_POLL_CONTROLLER 10378#ifdef CONFIG_NET_POLL_CONTROLLER
12873static void poll_bnx2x(struct net_device *dev) 10379static void poll_bnx2x(struct net_device *dev)
12874{ 10380{
@@ -13370,73 +10876,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13370 pci_set_drvdata(pdev, NULL); 10876 pci_set_drvdata(pdev, NULL);
13371} 10877}
13372 10878
13373static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13374{
13375 struct net_device *dev = pci_get_drvdata(pdev);
13376 struct bnx2x *bp;
13377
13378 if (!dev) {
13379 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13380 return -ENODEV;
13381 }
13382 bp = netdev_priv(dev);
13383
13384 rtnl_lock();
13385
13386 pci_save_state(pdev);
13387
13388 if (!netif_running(dev)) {
13389 rtnl_unlock();
13390 return 0;
13391 }
13392
13393 netif_device_detach(dev);
13394
13395 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13396
13397 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13398
13399 rtnl_unlock();
13400
13401 return 0;
13402}
13403
13404static int bnx2x_resume(struct pci_dev *pdev)
13405{
13406 struct net_device *dev = pci_get_drvdata(pdev);
13407 struct bnx2x *bp;
13408 int rc;
13409
13410 if (!dev) {
13411 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13412 return -ENODEV;
13413 }
13414 bp = netdev_priv(dev);
13415
13416 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13417 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13418 return -EAGAIN;
13419 }
13420
13421 rtnl_lock();
13422
13423 pci_restore_state(pdev);
13424
13425 if (!netif_running(dev)) {
13426 rtnl_unlock();
13427 return 0;
13428 }
13429
13430 bnx2x_set_power_state(bp, PCI_D0);
13431 netif_device_attach(dev);
13432
13433 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13434
13435 rtnl_unlock();
13436
13437 return rc;
13438}
13439
13440static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 10879static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13441{ 10880{
13442 int i; 10881 int i;
@@ -13758,7 +11197,7 @@ static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13758/* 11197/*
13759 * for commands that have no data 11198 * for commands that have no data
13760 */ 11199 */
13761static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 11200int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13762{ 11201{
13763 struct cnic_ctl_info ctl = {0}; 11202 struct cnic_ctl_info ctl = {0};
13764 11203
@@ -13826,7 +11265,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13826 return rc; 11265 return rc;
13827} 11266}
13828 11267
13829static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 11268void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13830{ 11269{
13831 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 11270 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13832 11271