aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c102
1 files changed, 58 insertions, 44 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ac90a3828f69..188e356c30a3 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,11 +58,11 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define DRV_MODULE_VERSION "2.0.9" 61#define DRV_MODULE_VERSION "2.0.15"
62#define DRV_MODULE_RELDATE "April 27, 2010" 62#define DRV_MODULE_RELDATE "May 4, 2010"
63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw" 66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw" 67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68 68
@@ -656,19 +656,11 @@ bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
656 if (stop_cnic) 656 if (stop_cnic)
657 bnx2_cnic_stop(bp); 657 bnx2_cnic_stop(bp);
658 if (netif_running(bp->dev)) { 658 if (netif_running(bp->dev)) {
659 int i;
660
661 bnx2_napi_disable(bp); 659 bnx2_napi_disable(bp);
662 netif_tx_disable(bp->dev); 660 netif_tx_disable(bp->dev);
663 /* prevent tx timeout */
664 for (i = 0; i < bp->dev->num_tx_queues; i++) {
665 struct netdev_queue *txq;
666
667 txq = netdev_get_tx_queue(bp->dev, i);
668 txq->trans_start = jiffies;
669 }
670 } 661 }
671 bnx2_disable_int_sync(bp); 662 bnx2_disable_int_sync(bp);
663 netif_carrier_off(bp->dev); /* prevent tx timeout */
672} 664}
673 665
674static void 666static void
@@ -677,6 +669,10 @@ bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 if (atomic_dec_and_test(&bp->intr_sem)) { 669 if (atomic_dec_and_test(&bp->intr_sem)) {
678 if (netif_running(bp->dev)) { 670 if (netif_running(bp->dev)) {
679 netif_tx_wake_all_queues(bp->dev); 671 netif_tx_wake_all_queues(bp->dev);
672 spin_lock_bh(&bp->phy_lock);
673 if (bp->link_up)
674 netif_carrier_on(bp->dev);
675 spin_unlock_bh(&bp->phy_lock);
680 bnx2_napi_enable(bp); 676 bnx2_napi_enable(bp);
681 bnx2_enable_int(bp); 677 bnx2_enable_int(bp);
682 if (start_cnic) 678 if (start_cnic)
@@ -2672,7 +2668,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2672 } 2668 }
2673 2669
2674 rx_pg->page = page; 2670 rx_pg->page = page;
2675 pci_unmap_addr_set(rx_pg, mapping, mapping); 2671 dma_unmap_addr_set(rx_pg, mapping, mapping);
2676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2672 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2673 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2678 return 0; 2674 return 0;
@@ -2687,7 +2683,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2687 if (!page) 2683 if (!page)
2688 return; 2684 return;
2689 2685
2690 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, 2686 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2691 PCI_DMA_FROMDEVICE); 2687 PCI_DMA_FROMDEVICE);
2692 2688
2693 __free_page(page); 2689 __free_page(page);
@@ -2719,7 +2715,8 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2719 } 2715 }
2720 2716
2721 rx_buf->skb = skb; 2717 rx_buf->skb = skb;
2722 pci_unmap_addr_set(rx_buf, mapping, mapping); 2718 rx_buf->desc = (struct l2_fhdr *) skb->data;
2719 dma_unmap_addr_set(rx_buf, mapping, mapping);
2723 2720
2724 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2721 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2725 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2722 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2818,7 +2815,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2818 } 2815 }
2819 } 2816 }
2820 2817
2821 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 2818 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2822 skb_headlen(skb), PCI_DMA_TODEVICE); 2819 skb_headlen(skb), PCI_DMA_TODEVICE);
2823 2820
2824 tx_buf->skb = NULL; 2821 tx_buf->skb = NULL;
@@ -2828,7 +2825,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2828 sw_cons = NEXT_TX_BD(sw_cons); 2825 sw_cons = NEXT_TX_BD(sw_cons);
2829 2826
2830 pci_unmap_page(bp->pdev, 2827 pci_unmap_page(bp->pdev,
2831 pci_unmap_addr( 2828 dma_unmap_addr(
2832 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], 2829 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2833 mapping), 2830 mapping),
2834 skb_shinfo(skb)->frags[i].size, 2831 skb_shinfo(skb)->frags[i].size,
@@ -2910,8 +2907,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2910 if (prod != cons) { 2907 if (prod != cons) {
2911 prod_rx_pg->page = cons_rx_pg->page; 2908 prod_rx_pg->page = cons_rx_pg->page;
2912 cons_rx_pg->page = NULL; 2909 cons_rx_pg->page = NULL;
2913 pci_unmap_addr_set(prod_rx_pg, mapping, 2910 dma_unmap_addr_set(prod_rx_pg, mapping,
2914 pci_unmap_addr(cons_rx_pg, mapping)); 2911 dma_unmap_addr(cons_rx_pg, mapping));
2915 2912
2916 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2913 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2917 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2914 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2935,18 +2932,19 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2935 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2932 prod_rx_buf = &rxr->rx_buf_ring[prod];
2936 2933
2937 pci_dma_sync_single_for_device(bp->pdev, 2934 pci_dma_sync_single_for_device(bp->pdev,
2938 pci_unmap_addr(cons_rx_buf, mapping), 2935 dma_unmap_addr(cons_rx_buf, mapping),
2939 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2936 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2940 2937
2941 rxr->rx_prod_bseq += bp->rx_buf_use_size; 2938 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2942 2939
2943 prod_rx_buf->skb = skb; 2940 prod_rx_buf->skb = skb;
2941 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2944 2942
2945 if (cons == prod) 2943 if (cons == prod)
2946 return; 2944 return;
2947 2945
2948 pci_unmap_addr_set(prod_rx_buf, mapping, 2946 dma_unmap_addr_set(prod_rx_buf, mapping,
2949 pci_unmap_addr(cons_rx_buf, mapping)); 2947 dma_unmap_addr(cons_rx_buf, mapping));
2950 2948
2951 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2952 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3019,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3019 /* Don't unmap yet. If we're unable to allocate a new 3017 /* Don't unmap yet. If we're unable to allocate a new
3020 * page, we need to recycle the page and the DMA addr. 3018 * page, we need to recycle the page and the DMA addr.
3021 */ 3019 */
3022 mapping_old = pci_unmap_addr(rx_pg, mapping); 3020 mapping_old = dma_unmap_addr(rx_pg, mapping);
3023 if (i == pages - 1) 3021 if (i == pages - 1)
3024 frag_len -= 4; 3022 frag_len -= 4;
3025 3023
@@ -3074,6 +3072,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3074 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; 3072 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3075 struct l2_fhdr *rx_hdr; 3073 struct l2_fhdr *rx_hdr;
3076 int rx_pkt = 0, pg_ring_used = 0; 3074 int rx_pkt = 0, pg_ring_used = 0;
3075 struct pci_dev *pdev = bp->pdev;
3077 3076
3078 hw_cons = bnx2_get_hw_rx_cons(bnapi); 3077 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3079 sw_cons = rxr->rx_cons; 3078 sw_cons = rxr->rx_cons;
@@ -3086,7 +3085,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3086 while (sw_cons != hw_cons) { 3085 while (sw_cons != hw_cons) {
3087 unsigned int len, hdr_len; 3086 unsigned int len, hdr_len;
3088 u32 status; 3087 u32 status;
3089 struct sw_bd *rx_buf; 3088 struct sw_bd *rx_buf, *next_rx_buf;
3090 struct sk_buff *skb; 3089 struct sk_buff *skb;
3091 dma_addr_t dma_addr; 3090 dma_addr_t dma_addr;
3092 u16 vtag = 0; 3091 u16 vtag = 0;
@@ -3097,16 +3096,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3097 3096
3098 rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; 3097 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3099 skb = rx_buf->skb; 3098 skb = rx_buf->skb;
3099 prefetchw(skb);
3100 3100
3101 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3102 next_rx_buf =
3103 &rxr->rx_buf_ring[
3104 RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3105 prefetch(next_rx_buf->desc);
3106 }
3101 rx_buf->skb = NULL; 3107 rx_buf->skb = NULL;
3102 3108
3103 dma_addr = pci_unmap_addr(rx_buf, mapping); 3109 dma_addr = dma_unmap_addr(rx_buf, mapping);
3104 3110
3105 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 3111 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3106 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3112 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3107 PCI_DMA_FROMDEVICE); 3113 PCI_DMA_FROMDEVICE);
3108 3114
3109 rx_hdr = (struct l2_fhdr *) skb->data; 3115 rx_hdr = rx_buf->desc;
3110 len = rx_hdr->l2_fhdr_pkt_len; 3116 len = rx_hdr->l2_fhdr_pkt_len;
3111 status = rx_hdr->l2_fhdr_status; 3117 status = rx_hdr->l2_fhdr_status;
3112 3118
@@ -3207,10 +3213,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3207 3213
3208#ifdef BCM_VLAN 3214#ifdef BCM_VLAN
3209 if (hw_vlan) 3215 if (hw_vlan)
3210 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); 3216 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3211 else 3217 else
3212#endif 3218#endif
3213 netif_receive_skb(skb); 3219 napi_gro_receive(&bnapi->napi, skb);
3214 3220
3215 rx_pkt++; 3221 rx_pkt++;
3216 3222
@@ -3548,7 +3554,6 @@ bnx2_set_rx_mode(struct net_device *dev)
3548 } 3554 }
3549 else { 3555 else {
3550 /* Accept one or more multicast(s). */ 3556 /* Accept one or more multicast(s). */
3551 struct dev_mc_list *mclist;
3552 u32 mc_filter[NUM_MC_HASH_REGISTERS]; 3557 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3553 u32 regidx; 3558 u32 regidx;
3554 u32 bit; 3559 u32 bit;
@@ -3556,8 +3561,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3556 3561
3557 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); 3562 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3558 3563
3559 netdev_for_each_mc_addr(mclist, dev) { 3564 netdev_for_each_mc_addr(ha, dev) {
3560 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 3565 crc = ether_crc_le(ETH_ALEN, ha->addr);
3561 bit = crc & 0xff; 3566 bit = crc & 0xff;
3562 regidx = (bit & 0xe0) >> 5; 3567 regidx = (bit & 0xe0) >> 5;
3563 bit &= 0x1f; 3568 bit &= 0x1f;
@@ -5318,7 +5323,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5318 } 5323 }
5319 5324
5320 pci_unmap_single(bp->pdev, 5325 pci_unmap_single(bp->pdev,
5321 pci_unmap_addr(tx_buf, mapping), 5326 dma_unmap_addr(tx_buf, mapping),
5322 skb_headlen(skb), 5327 skb_headlen(skb),
5323 PCI_DMA_TODEVICE); 5328 PCI_DMA_TODEVICE);
5324 5329
@@ -5329,7 +5334,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5329 for (k = 0; k < last; k++, j++) { 5334 for (k = 0; k < last; k++, j++) {
5330 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5335 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5331 pci_unmap_page(bp->pdev, 5336 pci_unmap_page(bp->pdev,
5332 pci_unmap_addr(tx_buf, mapping), 5337 dma_unmap_addr(tx_buf, mapping),
5333 skb_shinfo(skb)->frags[k].size, 5338 skb_shinfo(skb)->frags[k].size,
5334 PCI_DMA_TODEVICE); 5339 PCI_DMA_TODEVICE);
5335 } 5340 }
@@ -5359,7 +5364,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5359 continue; 5364 continue;
5360 5365
5361 pci_unmap_single(bp->pdev, 5366 pci_unmap_single(bp->pdev,
5362 pci_unmap_addr(rx_buf, mapping), 5367 dma_unmap_addr(rx_buf, mapping),
5363 bp->rx_buf_use_size, 5368 bp->rx_buf_use_size,
5364 PCI_DMA_FROMDEVICE); 5369 PCI_DMA_FROMDEVICE);
5365 5370
@@ -5765,11 +5770,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5765 rx_buf = &rxr->rx_buf_ring[rx_start_idx]; 5770 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5766 rx_skb = rx_buf->skb; 5771 rx_skb = rx_buf->skb;
5767 5772
5768 rx_hdr = (struct l2_fhdr *) rx_skb->data; 5773 rx_hdr = rx_buf->desc;
5769 skb_reserve(rx_skb, BNX2_RX_OFFSET); 5774 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5770 5775
5771 pci_dma_sync_single_for_cpu(bp->pdev, 5776 pci_dma_sync_single_for_cpu(bp->pdev,
5772 pci_unmap_addr(rx_buf, mapping), 5777 dma_unmap_addr(rx_buf, mapping),
5773 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5778 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5774 5779
5775 if (rx_hdr->l2_fhdr_status & 5780 if (rx_hdr->l2_fhdr_status &
@@ -6292,14 +6297,23 @@ static void
6292bnx2_dump_state(struct bnx2 *bp) 6297bnx2_dump_state(struct bnx2 *bp)
6293{ 6298{
6294 struct net_device *dev = bp->dev; 6299 struct net_device *dev = bp->dev;
6300 u32 mcp_p0, mcp_p1;
6295 6301
6296 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem)); 6302 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6297 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n", 6303 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6298 REG_RD(bp, BNX2_EMAC_TX_STATUS), 6304 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6305 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6306 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6299 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); 6307 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6308 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6309 mcp_p0 = BNX2_MCP_STATE_P0;
6310 mcp_p1 = BNX2_MCP_STATE_P1;
6311 } else {
6312 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6313 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6314 }
6300 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n", 6315 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6301 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0), 6316 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6302 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6303 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n", 6317 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6304 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); 6318 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6305 if (bp->flags & BNX2_FLAG_USING_MSIX) 6319 if (bp->flags & BNX2_FLAG_USING_MSIX)
@@ -6429,7 +6443,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6429 6443
6430 tx_buf = &txr->tx_buf_ring[ring_prod]; 6444 tx_buf = &txr->tx_buf_ring[ring_prod];
6431 tx_buf->skb = skb; 6445 tx_buf->skb = skb;
6432 pci_unmap_addr_set(tx_buf, mapping, mapping); 6446 dma_unmap_addr_set(tx_buf, mapping, mapping);
6433 6447
6434 txbd = &txr->tx_desc_ring[ring_prod]; 6448 txbd = &txr->tx_desc_ring[ring_prod];
6435 6449
@@ -6454,7 +6468,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6454 len, PCI_DMA_TODEVICE); 6468 len, PCI_DMA_TODEVICE);
6455 if (pci_dma_mapping_error(bp->pdev, mapping)) 6469 if (pci_dma_mapping_error(bp->pdev, mapping))
6456 goto dma_error; 6470 goto dma_error;
6457 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, 6471 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6458 mapping); 6472 mapping);
6459 6473
6460 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6474 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6491,7 +6505,7 @@ dma_error:
6491 ring_prod = TX_RING_IDX(prod); 6505 ring_prod = TX_RING_IDX(prod);
6492 tx_buf = &txr->tx_buf_ring[ring_prod]; 6506 tx_buf = &txr->tx_buf_ring[ring_prod];
6493 tx_buf->skb = NULL; 6507 tx_buf->skb = NULL;
6494 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6508 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6495 skb_headlen(skb), PCI_DMA_TODEVICE); 6509 skb_headlen(skb), PCI_DMA_TODEVICE);
6496 6510
6497 /* unmap remaining mapped pages */ 6511 /* unmap remaining mapped pages */
@@ -6499,7 +6513,7 @@ dma_error:
6499 prod = NEXT_TX_BD(prod); 6513 prod = NEXT_TX_BD(prod);
6500 ring_prod = TX_RING_IDX(prod); 6514 ring_prod = TX_RING_IDX(prod);
6501 tx_buf = &txr->tx_buf_ring[ring_prod]; 6515 tx_buf = &txr->tx_buf_ring[ring_prod];
6502 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6516 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6503 skb_shinfo(skb)->frags[i].size, 6517 skb_shinfo(skb)->frags[i].size,
6504 PCI_DMA_TODEVICE); 6518 PCI_DMA_TODEVICE);
6505 } 6519 }
@@ -8297,7 +8311,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8297 memcpy(dev->dev_addr, bp->mac_addr, 6); 8311 memcpy(dev->dev_addr, bp->mac_addr, 6);
8298 memcpy(dev->perm_addr, bp->mac_addr, 6); 8312 memcpy(dev->perm_addr, bp->mac_addr, 6);
8299 8313
8300 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 8314 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8301 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG); 8315 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8302 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8316 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8303 dev->features |= NETIF_F_IPV6_CSUM; 8317 dev->features |= NETIF_F_IPV6_CSUM;