aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c129
1 files changed, 74 insertions, 55 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 381887ba677c..667f4196dc29 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,8 +58,8 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define DRV_MODULE_VERSION "2.0.8" 61#define DRV_MODULE_VERSION "2.0.9"
62#define DRV_MODULE_RELDATE "Feb 15, 2010" 62#define DRV_MODULE_RELDATE "April 27, 2010"
63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = {
246 246
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 248
249static void bnx2_init_napi(struct bnx2 *bp);
250
249static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) 251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250{ 252{
251 u32 diff; 253 u32 diff;
@@ -649,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
649} 651}
650 652
651static void 653static void
652bnx2_netif_stop(struct bnx2 *bp) 654bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
653{ 655{
654 bnx2_cnic_stop(bp); 656 if (stop_cnic)
657 bnx2_cnic_stop(bp);
655 if (netif_running(bp->dev)) { 658 if (netif_running(bp->dev)) {
656 int i; 659 int i;
657 660
@@ -669,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
669} 672}
670 673
671static void 674static void
672bnx2_netif_start(struct bnx2 *bp) 675bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
673{ 676{
674 if (atomic_dec_and_test(&bp->intr_sem)) { 677 if (atomic_dec_and_test(&bp->intr_sem)) {
675 if (netif_running(bp->dev)) { 678 if (netif_running(bp->dev)) {
676 netif_tx_wake_all_queues(bp->dev); 679 netif_tx_wake_all_queues(bp->dev);
677 bnx2_napi_enable(bp); 680 bnx2_napi_enable(bp);
678 bnx2_enable_int(bp); 681 bnx2_enable_int(bp);
679 bnx2_cnic_start(bp); 682 if (start_cnic)
683 bnx2_cnic_start(bp);
680 } 684 }
681 } 685 }
682} 686}
@@ -2668,7 +2672,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2668 } 2672 }
2669 2673
2670 rx_pg->page = page; 2674 rx_pg->page = page;
2671 pci_unmap_addr_set(rx_pg, mapping, mapping); 2675 dma_unmap_addr_set(rx_pg, mapping, mapping);
2672 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2673 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2674 return 0; 2678 return 0;
@@ -2683,7 +2687,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2683 if (!page) 2687 if (!page)
2684 return; 2688 return;
2685 2689
2686 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, 2690 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2687 PCI_DMA_FROMDEVICE); 2691 PCI_DMA_FROMDEVICE);
2688 2692
2689 __free_page(page); 2693 __free_page(page);
@@ -2715,7 +2719,8 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2715 } 2719 }
2716 2720
2717 rx_buf->skb = skb; 2721 rx_buf->skb = skb;
2718 pci_unmap_addr_set(rx_buf, mapping, mapping); 2722 rx_buf->desc = (struct l2_fhdr *) skb->data;
2723 dma_unmap_addr_set(rx_buf, mapping, mapping);
2719 2724
2720 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2725 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2721 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2726 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2814,7 +2819,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2814 } 2819 }
2815 } 2820 }
2816 2821
2817 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 2822 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2818 skb_headlen(skb), PCI_DMA_TODEVICE); 2823 skb_headlen(skb), PCI_DMA_TODEVICE);
2819 2824
2820 tx_buf->skb = NULL; 2825 tx_buf->skb = NULL;
@@ -2824,7 +2829,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2824 sw_cons = NEXT_TX_BD(sw_cons); 2829 sw_cons = NEXT_TX_BD(sw_cons);
2825 2830
2826 pci_unmap_page(bp->pdev, 2831 pci_unmap_page(bp->pdev,
2827 pci_unmap_addr( 2832 dma_unmap_addr(
2828 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], 2833 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2829 mapping), 2834 mapping),
2830 skb_shinfo(skb)->frags[i].size, 2835 skb_shinfo(skb)->frags[i].size,
@@ -2906,8 +2911,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906 if (prod != cons) { 2911 if (prod != cons) {
2907 prod_rx_pg->page = cons_rx_pg->page; 2912 prod_rx_pg->page = cons_rx_pg->page;
2908 cons_rx_pg->page = NULL; 2913 cons_rx_pg->page = NULL;
2909 pci_unmap_addr_set(prod_rx_pg, mapping, 2914 dma_unmap_addr_set(prod_rx_pg, mapping,
2910 pci_unmap_addr(cons_rx_pg, mapping)); 2915 dma_unmap_addr(cons_rx_pg, mapping));
2911 2916
2912 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2917 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2913 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2918 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2931,18 +2936,19 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2931 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2936 prod_rx_buf = &rxr->rx_buf_ring[prod];
2932 2937
2933 pci_dma_sync_single_for_device(bp->pdev, 2938 pci_dma_sync_single_for_device(bp->pdev,
2934 pci_unmap_addr(cons_rx_buf, mapping), 2939 dma_unmap_addr(cons_rx_buf, mapping),
2935 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2940 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2936 2941
2937 rxr->rx_prod_bseq += bp->rx_buf_use_size; 2942 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2938 2943
2939 prod_rx_buf->skb = skb; 2944 prod_rx_buf->skb = skb;
2945 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2940 2946
2941 if (cons == prod) 2947 if (cons == prod)
2942 return; 2948 return;
2943 2949
2944 pci_unmap_addr_set(prod_rx_buf, mapping, 2950 dma_unmap_addr_set(prod_rx_buf, mapping,
2945 pci_unmap_addr(cons_rx_buf, mapping)); 2951 dma_unmap_addr(cons_rx_buf, mapping));
2946 2952
2947 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2953 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2948 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2954 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3015,7 +3021,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3015 /* Don't unmap yet. If we're unable to allocate a new 3021 /* Don't unmap yet. If we're unable to allocate a new
3016 * page, we need to recycle the page and the DMA addr. 3022 * page, we need to recycle the page and the DMA addr.
3017 */ 3023 */
3018 mapping_old = pci_unmap_addr(rx_pg, mapping); 3024 mapping_old = dma_unmap_addr(rx_pg, mapping);
3019 if (i == pages - 1) 3025 if (i == pages - 1)
3020 frag_len -= 4; 3026 frag_len -= 4;
3021 3027
@@ -3070,6 +3076,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3070 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; 3076 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3071 struct l2_fhdr *rx_hdr; 3077 struct l2_fhdr *rx_hdr;
3072 int rx_pkt = 0, pg_ring_used = 0; 3078 int rx_pkt = 0, pg_ring_used = 0;
3079 struct pci_dev *pdev = bp->pdev;
3073 3080
3074 hw_cons = bnx2_get_hw_rx_cons(bnapi); 3081 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3075 sw_cons = rxr->rx_cons; 3082 sw_cons = rxr->rx_cons;
@@ -3082,7 +3089,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3082 while (sw_cons != hw_cons) { 3089 while (sw_cons != hw_cons) {
3083 unsigned int len, hdr_len; 3090 unsigned int len, hdr_len;
3084 u32 status; 3091 u32 status;
3085 struct sw_bd *rx_buf; 3092 struct sw_bd *rx_buf, *next_rx_buf;
3086 struct sk_buff *skb; 3093 struct sk_buff *skb;
3087 dma_addr_t dma_addr; 3094 dma_addr_t dma_addr;
3088 u16 vtag = 0; 3095 u16 vtag = 0;
@@ -3093,16 +3100,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3093 3100
3094 rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; 3101 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3095 skb = rx_buf->skb; 3102 skb = rx_buf->skb;
3103 prefetchw(skb);
3096 3104
3105 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3106 next_rx_buf =
3107 &rxr->rx_buf_ring[
3108 RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3109 prefetch(next_rx_buf->desc);
3110 }
3097 rx_buf->skb = NULL; 3111 rx_buf->skb = NULL;
3098 3112
3099 dma_addr = pci_unmap_addr(rx_buf, mapping); 3113 dma_addr = dma_unmap_addr(rx_buf, mapping);
3100 3114
3101 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 3115 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3102 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3116 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3103 PCI_DMA_FROMDEVICE); 3117 PCI_DMA_FROMDEVICE);
3104 3118
3105 rx_hdr = (struct l2_fhdr *) skb->data; 3119 rx_hdr = rx_buf->desc;
3106 len = rx_hdr->l2_fhdr_pkt_len; 3120 len = rx_hdr->l2_fhdr_pkt_len;
3107 status = rx_hdr->l2_fhdr_status; 3121 status = rx_hdr->l2_fhdr_status;
3108 3122
@@ -3203,10 +3217,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3203 3217
3204#ifdef BCM_VLAN 3218#ifdef BCM_VLAN
3205 if (hw_vlan) 3219 if (hw_vlan)
3206 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); 3220 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3207 else 3221 else
3208#endif 3222#endif
3209 netif_receive_skb(skb); 3223 napi_gro_receive(&bnapi->napi, skb);
3210 3224
3211 rx_pkt++; 3225 rx_pkt++;
3212 3226
@@ -3544,7 +3558,6 @@ bnx2_set_rx_mode(struct net_device *dev)
3544 } 3558 }
3545 else { 3559 else {
3546 /* Accept one or more multicast(s). */ 3560 /* Accept one or more multicast(s). */
3547 struct dev_mc_list *mclist;
3548 u32 mc_filter[NUM_MC_HASH_REGISTERS]; 3561 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3549 u32 regidx; 3562 u32 regidx;
3550 u32 bit; 3563 u32 bit;
@@ -3552,8 +3565,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3552 3565
3553 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); 3566 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3554 3567
3555 netdev_for_each_mc_addr(mclist, dev) { 3568 netdev_for_each_mc_addr(ha, dev) {
3556 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 3569 crc = ether_crc_le(ETH_ALEN, ha->addr);
3557 bit = crc & 0xff; 3570 bit = crc & 0xff;
3558 regidx = (bit & 0xe0) >> 5; 3571 regidx = (bit & 0xe0) >> 5;
3559 bit &= 0x1f; 3572 bit &= 0x1f;
@@ -4757,8 +4770,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4757 rc = bnx2_alloc_bad_rbuf(bp); 4770 rc = bnx2_alloc_bad_rbuf(bp);
4758 } 4771 }
4759 4772
4760 if (bp->flags & BNX2_FLAG_USING_MSIX) 4773 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4761 bnx2_setup_msix_tbl(bp); 4774 bnx2_setup_msix_tbl(bp);
4775 /* Prevent MSIX table reads and write from timing out */
4776 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4777 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4778 }
4762 4779
4763 return rc; 4780 return rc;
4764} 4781}
@@ -5310,7 +5327,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5310 } 5327 }
5311 5328
5312 pci_unmap_single(bp->pdev, 5329 pci_unmap_single(bp->pdev,
5313 pci_unmap_addr(tx_buf, mapping), 5330 dma_unmap_addr(tx_buf, mapping),
5314 skb_headlen(skb), 5331 skb_headlen(skb),
5315 PCI_DMA_TODEVICE); 5332 PCI_DMA_TODEVICE);
5316 5333
@@ -5321,7 +5338,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5321 for (k = 0; k < last; k++, j++) { 5338 for (k = 0; k < last; k++, j++) {
5322 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5339 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5323 pci_unmap_page(bp->pdev, 5340 pci_unmap_page(bp->pdev,
5324 pci_unmap_addr(tx_buf, mapping), 5341 dma_unmap_addr(tx_buf, mapping),
5325 skb_shinfo(skb)->frags[k].size, 5342 skb_shinfo(skb)->frags[k].size,
5326 PCI_DMA_TODEVICE); 5343 PCI_DMA_TODEVICE);
5327 } 5344 }
@@ -5351,7 +5368,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5351 continue; 5368 continue;
5352 5369
5353 pci_unmap_single(bp->pdev, 5370 pci_unmap_single(bp->pdev,
5354 pci_unmap_addr(rx_buf, mapping), 5371 dma_unmap_addr(rx_buf, mapping),
5355 bp->rx_buf_use_size, 5372 bp->rx_buf_use_size,
5356 PCI_DMA_FROMDEVICE); 5373 PCI_DMA_FROMDEVICE);
5357 5374
@@ -5757,11 +5774,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5757 rx_buf = &rxr->rx_buf_ring[rx_start_idx]; 5774 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5758 rx_skb = rx_buf->skb; 5775 rx_skb = rx_buf->skb;
5759 5776
5760 rx_hdr = (struct l2_fhdr *) rx_skb->data; 5777 rx_hdr = rx_buf->desc;
5761 skb_reserve(rx_skb, BNX2_RX_OFFSET); 5778 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5762 5779
5763 pci_dma_sync_single_for_cpu(bp->pdev, 5780 pci_dma_sync_single_for_cpu(bp->pdev,
5764 pci_unmap_addr(rx_buf, mapping), 5781 dma_unmap_addr(rx_buf, mapping),
5765 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5782 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5766 5783
5767 if (rx_hdr->l2_fhdr_status & 5784 if (rx_hdr->l2_fhdr_status &
@@ -6197,6 +6214,7 @@ bnx2_open(struct net_device *dev)
6197 bnx2_disable_int(bp); 6214 bnx2_disable_int(bp);
6198 6215
6199 bnx2_setup_int_mode(bp, disable_msi); 6216 bnx2_setup_int_mode(bp, disable_msi);
6217 bnx2_init_napi(bp);
6200 bnx2_napi_enable(bp); 6218 bnx2_napi_enable(bp);
6201 rc = bnx2_alloc_mem(bp); 6219 rc = bnx2_alloc_mem(bp);
6202 if (rc) 6220 if (rc)
@@ -6270,12 +6288,12 @@ bnx2_reset_task(struct work_struct *work)
6270 return; 6288 return;
6271 } 6289 }
6272 6290
6273 bnx2_netif_stop(bp); 6291 bnx2_netif_stop(bp, true);
6274 6292
6275 bnx2_init_nic(bp, 1); 6293 bnx2_init_nic(bp, 1);
6276 6294
6277 atomic_set(&bp->intr_sem, 1); 6295 atomic_set(&bp->intr_sem, 1);
6278 bnx2_netif_start(bp); 6296 bnx2_netif_start(bp, true);
6279 rtnl_unlock(); 6297 rtnl_unlock();
6280} 6298}
6281 6299
@@ -6317,7 +6335,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6317 struct bnx2 *bp = netdev_priv(dev); 6335 struct bnx2 *bp = netdev_priv(dev);
6318 6336
6319 if (netif_running(dev)) 6337 if (netif_running(dev))
6320 bnx2_netif_stop(bp); 6338 bnx2_netif_stop(bp, false);
6321 6339
6322 bp->vlgrp = vlgrp; 6340 bp->vlgrp = vlgrp;
6323 6341
@@ -6328,7 +6346,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6328 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) 6346 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6329 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); 6347 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6330 6348
6331 bnx2_netif_start(bp); 6349 bnx2_netif_start(bp, false);
6332} 6350}
6333#endif 6351#endif
6334 6352
@@ -6420,7 +6438,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6420 6438
6421 tx_buf = &txr->tx_buf_ring[ring_prod]; 6439 tx_buf = &txr->tx_buf_ring[ring_prod];
6422 tx_buf->skb = skb; 6440 tx_buf->skb = skb;
6423 pci_unmap_addr_set(tx_buf, mapping, mapping); 6441 dma_unmap_addr_set(tx_buf, mapping, mapping);
6424 6442
6425 txbd = &txr->tx_desc_ring[ring_prod]; 6443 txbd = &txr->tx_desc_ring[ring_prod];
6426 6444
@@ -6445,7 +6463,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6445 len, PCI_DMA_TODEVICE); 6463 len, PCI_DMA_TODEVICE);
6446 if (pci_dma_mapping_error(bp->pdev, mapping)) 6464 if (pci_dma_mapping_error(bp->pdev, mapping))
6447 goto dma_error; 6465 goto dma_error;
6448 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, 6466 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6449 mapping); 6467 mapping);
6450 6468
6451 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6469 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6482,7 +6500,7 @@ dma_error:
6482 ring_prod = TX_RING_IDX(prod); 6500 ring_prod = TX_RING_IDX(prod);
6483 tx_buf = &txr->tx_buf_ring[ring_prod]; 6501 tx_buf = &txr->tx_buf_ring[ring_prod];
6484 tx_buf->skb = NULL; 6502 tx_buf->skb = NULL;
6485 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6503 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6486 skb_headlen(skb), PCI_DMA_TODEVICE); 6504 skb_headlen(skb), PCI_DMA_TODEVICE);
6487 6505
6488 /* unmap remaining mapped pages */ 6506 /* unmap remaining mapped pages */
@@ -6490,7 +6508,7 @@ dma_error:
6490 prod = NEXT_TX_BD(prod); 6508 prod = NEXT_TX_BD(prod);
6491 ring_prod = TX_RING_IDX(prod); 6509 ring_prod = TX_RING_IDX(prod);
6492 tx_buf = &txr->tx_buf_ring[ring_prod]; 6510 tx_buf = &txr->tx_buf_ring[ring_prod];
6493 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6511 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6494 skb_shinfo(skb)->frags[i].size, 6512 skb_shinfo(skb)->frags[i].size,
6495 PCI_DMA_TODEVICE); 6513 PCI_DMA_TODEVICE);
6496 } 6514 }
@@ -7048,9 +7066,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7048 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; 7066 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7049 7067
7050 if (netif_running(bp->dev)) { 7068 if (netif_running(bp->dev)) {
7051 bnx2_netif_stop(bp); 7069 bnx2_netif_stop(bp, true);
7052 bnx2_init_nic(bp, 0); 7070 bnx2_init_nic(bp, 0);
7053 bnx2_netif_start(bp); 7071 bnx2_netif_start(bp, true);
7054 } 7072 }
7055 7073
7056 return 0; 7074 return 0;
@@ -7080,7 +7098,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7080 /* Reset will erase chipset stats; save them */ 7098 /* Reset will erase chipset stats; save them */
7081 bnx2_save_stats(bp); 7099 bnx2_save_stats(bp);
7082 7100
7083 bnx2_netif_stop(bp); 7101 bnx2_netif_stop(bp, true);
7084 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7102 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7085 bnx2_free_skbs(bp); 7103 bnx2_free_skbs(bp);
7086 bnx2_free_mem(bp); 7104 bnx2_free_mem(bp);
@@ -7108,7 +7126,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7108 bnx2_setup_cnic_irq_info(bp); 7126 bnx2_setup_cnic_irq_info(bp);
7109 mutex_unlock(&bp->cnic_lock); 7127 mutex_unlock(&bp->cnic_lock);
7110#endif 7128#endif
7111 bnx2_netif_start(bp); 7129 bnx2_netif_start(bp, true);
7112 } 7130 }
7113 return 0; 7131 return 0;
7114} 7132}
@@ -7361,7 +7379,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7361 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7379 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7362 int i; 7380 int i;
7363 7381
7364 bnx2_netif_stop(bp); 7382 bnx2_netif_stop(bp, true);
7365 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); 7383 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7366 bnx2_free_skbs(bp); 7384 bnx2_free_skbs(bp);
7367 7385
@@ -7380,7 +7398,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7380 bnx2_shutdown_chip(bp); 7398 bnx2_shutdown_chip(bp);
7381 else { 7399 else {
7382 bnx2_init_nic(bp, 1); 7400 bnx2_init_nic(bp, 1);
7383 bnx2_netif_start(bp); 7401 bnx2_netif_start(bp, true);
7384 } 7402 }
7385 7403
7386 /* wait for link up */ 7404 /* wait for link up */
@@ -7643,9 +7661,11 @@ poll_bnx2(struct net_device *dev)
7643 int i; 7661 int i;
7644 7662
7645 for (i = 0; i < bp->irq_nvecs; i++) { 7663 for (i = 0; i < bp->irq_nvecs; i++) {
7646 disable_irq(bp->irq_tbl[i].vector); 7664 struct bnx2_irq *irq = &bp->irq_tbl[i];
7647 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]); 7665
7648 enable_irq(bp->irq_tbl[i].vector); 7666 disable_irq(irq->vector);
7667 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7668 enable_irq(irq->vector);
7649 } 7669 }
7650} 7670}
7651#endif 7671#endif
@@ -8207,7 +8227,7 @@ bnx2_init_napi(struct bnx2 *bp)
8207{ 8227{
8208 int i; 8228 int i;
8209 8229
8210 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 8230 for (i = 0; i < bp->irq_nvecs; i++) {
8211 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; 8231 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8212 int (*poll)(struct napi_struct *, int); 8232 int (*poll)(struct napi_struct *, int);
8213 8233
@@ -8276,7 +8296,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8276 dev->ethtool_ops = &bnx2_ethtool_ops; 8296 dev->ethtool_ops = &bnx2_ethtool_ops;
8277 8297
8278 bp = netdev_priv(dev); 8298 bp = netdev_priv(dev);
8279 bnx2_init_napi(bp);
8280 8299
8281 pci_set_drvdata(pdev, dev); 8300 pci_set_drvdata(pdev, dev);
8282 8301
@@ -8287,7 +8306,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8287 memcpy(dev->dev_addr, bp->mac_addr, 6); 8306 memcpy(dev->dev_addr, bp->mac_addr, 6);
8288 memcpy(dev->perm_addr, bp->mac_addr, 6); 8307 memcpy(dev->perm_addr, bp->mac_addr, 6);
8289 8308
8290 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 8309 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8291 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG); 8310 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8292 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8311 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8293 dev->features |= NETIF_F_IPV6_CSUM; 8312 dev->features |= NETIF_F_IPV6_CSUM;
@@ -8373,7 +8392,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8373 return 0; 8392 return 0;
8374 8393
8375 flush_scheduled_work(); 8394 flush_scheduled_work();
8376 bnx2_netif_stop(bp); 8395 bnx2_netif_stop(bp, true);
8377 netif_device_detach(dev); 8396 netif_device_detach(dev);
8378 del_timer_sync(&bp->timer); 8397 del_timer_sync(&bp->timer);
8379 bnx2_shutdown_chip(bp); 8398 bnx2_shutdown_chip(bp);
@@ -8395,7 +8414,7 @@ bnx2_resume(struct pci_dev *pdev)
8395 bnx2_set_power_state(bp, PCI_D0); 8414 bnx2_set_power_state(bp, PCI_D0);
8396 netif_device_attach(dev); 8415 netif_device_attach(dev);
8397 bnx2_init_nic(bp, 1); 8416 bnx2_init_nic(bp, 1);
8398 bnx2_netif_start(bp); 8417 bnx2_netif_start(bp, true);
8399 return 0; 8418 return 0;
8400} 8419}
8401 8420
@@ -8422,7 +8441,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8422 } 8441 }
8423 8442
8424 if (netif_running(dev)) { 8443 if (netif_running(dev)) {
8425 bnx2_netif_stop(bp); 8444 bnx2_netif_stop(bp, true);
8426 del_timer_sync(&bp->timer); 8445 del_timer_sync(&bp->timer);
8427 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 8446 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8428 } 8447 }
@@ -8479,7 +8498,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
8479 8498
8480 rtnl_lock(); 8499 rtnl_lock();
8481 if (netif_running(dev)) 8500 if (netif_running(dev))
8482 bnx2_netif_start(bp); 8501 bnx2_netif_start(bp, true);
8483 8502
8484 netif_device_attach(dev); 8503 netif_device_attach(dev);
8485 rtnl_unlock(); 8504 rtnl_unlock();