aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r--drivers/net/bnx2.c59
1 files changed, 31 insertions, 28 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 381887ba677c..53326fed6c81 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = {
246 246
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 248
249static void bnx2_init_napi(struct bnx2 *bp);
250
249static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) 251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250{ 252{
251 u32 diff; 253 u32 diff;
@@ -2668,7 +2670,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2668 } 2670 }
2669 2671
2670 rx_pg->page = page; 2672 rx_pg->page = page;
2671 pci_unmap_addr_set(rx_pg, mapping, mapping); 2673 dma_unmap_addr_set(rx_pg, mapping, mapping);
2672 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2673 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2674 return 0; 2676 return 0;
@@ -2683,7 +2685,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2683 if (!page) 2685 if (!page)
2684 return; 2686 return;
2685 2687
2686 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, 2688 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2687 PCI_DMA_FROMDEVICE); 2689 PCI_DMA_FROMDEVICE);
2688 2690
2689 __free_page(page); 2691 __free_page(page);
@@ -2715,7 +2717,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2715 } 2717 }
2716 2718
2717 rx_buf->skb = skb; 2719 rx_buf->skb = skb;
2718 pci_unmap_addr_set(rx_buf, mapping, mapping); 2720 dma_unmap_addr_set(rx_buf, mapping, mapping);
2719 2721
2720 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2721 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2814,7 +2816,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2814 } 2816 }
2815 } 2817 }
2816 2818
2817 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 2819 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2818 skb_headlen(skb), PCI_DMA_TODEVICE); 2820 skb_headlen(skb), PCI_DMA_TODEVICE);
2819 2821
2820 tx_buf->skb = NULL; 2822 tx_buf->skb = NULL;
@@ -2824,7 +2826,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2824 sw_cons = NEXT_TX_BD(sw_cons); 2826 sw_cons = NEXT_TX_BD(sw_cons);
2825 2827
2826 pci_unmap_page(bp->pdev, 2828 pci_unmap_page(bp->pdev,
2827 pci_unmap_addr( 2829 dma_unmap_addr(
2828 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], 2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2829 mapping), 2831 mapping),
2830 skb_shinfo(skb)->frags[i].size, 2832 skb_shinfo(skb)->frags[i].size,
@@ -2906,8 +2908,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906 if (prod != cons) { 2908 if (prod != cons) {
2907 prod_rx_pg->page = cons_rx_pg->page; 2909 prod_rx_pg->page = cons_rx_pg->page;
2908 cons_rx_pg->page = NULL; 2910 cons_rx_pg->page = NULL;
2909 pci_unmap_addr_set(prod_rx_pg, mapping, 2911 dma_unmap_addr_set(prod_rx_pg, mapping,
2910 pci_unmap_addr(cons_rx_pg, mapping)); 2912 dma_unmap_addr(cons_rx_pg, mapping));
2911 2913
2912 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2913 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2931,7 +2933,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2931 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2933 prod_rx_buf = &rxr->rx_buf_ring[prod];
2932 2934
2933 pci_dma_sync_single_for_device(bp->pdev, 2935 pci_dma_sync_single_for_device(bp->pdev,
2934 pci_unmap_addr(cons_rx_buf, mapping), 2936 dma_unmap_addr(cons_rx_buf, mapping),
2935 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2936 2938
2937 rxr->rx_prod_bseq += bp->rx_buf_use_size; 2939 rxr->rx_prod_bseq += bp->rx_buf_use_size;
@@ -2941,8 +2943,8 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2941 if (cons == prod) 2943 if (cons == prod)
2942 return; 2944 return;
2943 2945
2944 pci_unmap_addr_set(prod_rx_buf, mapping, 2946 dma_unmap_addr_set(prod_rx_buf, mapping,
2945 pci_unmap_addr(cons_rx_buf, mapping)); 2947 dma_unmap_addr(cons_rx_buf, mapping));
2946 2948
2947 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2948 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3015,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3015 /* Don't unmap yet. If we're unable to allocate a new 3017 /* Don't unmap yet. If we're unable to allocate a new
3016 * page, we need to recycle the page and the DMA addr. 3018 * page, we need to recycle the page and the DMA addr.
3017 */ 3019 */
3018 mapping_old = pci_unmap_addr(rx_pg, mapping); 3020 mapping_old = dma_unmap_addr(rx_pg, mapping);
3019 if (i == pages - 1) 3021 if (i == pages - 1)
3020 frag_len -= 4; 3022 frag_len -= 4;
3021 3023
@@ -3096,7 +3098,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3096 3098
3097 rx_buf->skb = NULL; 3099 rx_buf->skb = NULL;
3098 3100
3099 dma_addr = pci_unmap_addr(rx_buf, mapping); 3101 dma_addr = dma_unmap_addr(rx_buf, mapping);
3100 3102
3101 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3102 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
@@ -3544,7 +3546,6 @@ bnx2_set_rx_mode(struct net_device *dev)
3544 } 3546 }
3545 else { 3547 else {
3546 /* Accept one or more multicast(s). */ 3548 /* Accept one or more multicast(s). */
3547 struct dev_mc_list *mclist;
3548 u32 mc_filter[NUM_MC_HASH_REGISTERS]; 3549 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3549 u32 regidx; 3550 u32 regidx;
3550 u32 bit; 3551 u32 bit;
@@ -3552,8 +3553,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3552 3553
3553 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); 3554 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3554 3555
3555 netdev_for_each_mc_addr(mclist, dev) { 3556 netdev_for_each_mc_addr(ha, dev) {
3556 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 3557 crc = ether_crc_le(ETH_ALEN, ha->addr);
3557 bit = crc & 0xff; 3558 bit = crc & 0xff;
3558 regidx = (bit & 0xe0) >> 5; 3559 regidx = (bit & 0xe0) >> 5;
3559 bit &= 0x1f; 3560 bit &= 0x1f;
@@ -5310,7 +5311,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5310 } 5311 }
5311 5312
5312 pci_unmap_single(bp->pdev, 5313 pci_unmap_single(bp->pdev,
5313 pci_unmap_addr(tx_buf, mapping), 5314 dma_unmap_addr(tx_buf, mapping),
5314 skb_headlen(skb), 5315 skb_headlen(skb),
5315 PCI_DMA_TODEVICE); 5316 PCI_DMA_TODEVICE);
5316 5317
@@ -5321,7 +5322,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5321 for (k = 0; k < last; k++, j++) { 5322 for (k = 0; k < last; k++, j++) {
5322 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5323 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5323 pci_unmap_page(bp->pdev, 5324 pci_unmap_page(bp->pdev,
5324 pci_unmap_addr(tx_buf, mapping), 5325 dma_unmap_addr(tx_buf, mapping),
5325 skb_shinfo(skb)->frags[k].size, 5326 skb_shinfo(skb)->frags[k].size,
5326 PCI_DMA_TODEVICE); 5327 PCI_DMA_TODEVICE);
5327 } 5328 }
@@ -5351,7 +5352,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5351 continue; 5352 continue;
5352 5353
5353 pci_unmap_single(bp->pdev, 5354 pci_unmap_single(bp->pdev,
5354 pci_unmap_addr(rx_buf, mapping), 5355 dma_unmap_addr(rx_buf, mapping),
5355 bp->rx_buf_use_size, 5356 bp->rx_buf_use_size,
5356 PCI_DMA_FROMDEVICE); 5357 PCI_DMA_FROMDEVICE);
5357 5358
@@ -5761,7 +5762,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5761 skb_reserve(rx_skb, BNX2_RX_OFFSET); 5762 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5762 5763
5763 pci_dma_sync_single_for_cpu(bp->pdev, 5764 pci_dma_sync_single_for_cpu(bp->pdev,
5764 pci_unmap_addr(rx_buf, mapping), 5765 dma_unmap_addr(rx_buf, mapping),
5765 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5766 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5766 5767
5767 if (rx_hdr->l2_fhdr_status & 5768 if (rx_hdr->l2_fhdr_status &
@@ -6197,6 +6198,7 @@ bnx2_open(struct net_device *dev)
6197 bnx2_disable_int(bp); 6198 bnx2_disable_int(bp);
6198 6199
6199 bnx2_setup_int_mode(bp, disable_msi); 6200 bnx2_setup_int_mode(bp, disable_msi);
6201 bnx2_init_napi(bp);
6200 bnx2_napi_enable(bp); 6202 bnx2_napi_enable(bp);
6201 rc = bnx2_alloc_mem(bp); 6203 rc = bnx2_alloc_mem(bp);
6202 if (rc) 6204 if (rc)
@@ -6420,7 +6422,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6420 6422
6421 tx_buf = &txr->tx_buf_ring[ring_prod]; 6423 tx_buf = &txr->tx_buf_ring[ring_prod];
6422 tx_buf->skb = skb; 6424 tx_buf->skb = skb;
6423 pci_unmap_addr_set(tx_buf, mapping, mapping); 6425 dma_unmap_addr_set(tx_buf, mapping, mapping);
6424 6426
6425 txbd = &txr->tx_desc_ring[ring_prod]; 6427 txbd = &txr->tx_desc_ring[ring_prod];
6426 6428
@@ -6445,7 +6447,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6445 len, PCI_DMA_TODEVICE); 6447 len, PCI_DMA_TODEVICE);
6446 if (pci_dma_mapping_error(bp->pdev, mapping)) 6448 if (pci_dma_mapping_error(bp->pdev, mapping))
6447 goto dma_error; 6449 goto dma_error;
6448 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, 6450 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6449 mapping); 6451 mapping);
6450 6452
6451 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6453 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6482,7 +6484,7 @@ dma_error:
6482 ring_prod = TX_RING_IDX(prod); 6484 ring_prod = TX_RING_IDX(prod);
6483 tx_buf = &txr->tx_buf_ring[ring_prod]; 6485 tx_buf = &txr->tx_buf_ring[ring_prod];
6484 tx_buf->skb = NULL; 6486 tx_buf->skb = NULL;
6485 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6487 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6486 skb_headlen(skb), PCI_DMA_TODEVICE); 6488 skb_headlen(skb), PCI_DMA_TODEVICE);
6487 6489
6488 /* unmap remaining mapped pages */ 6490 /* unmap remaining mapped pages */
@@ -6490,7 +6492,7 @@ dma_error:
6490 prod = NEXT_TX_BD(prod); 6492 prod = NEXT_TX_BD(prod);
6491 ring_prod = TX_RING_IDX(prod); 6493 ring_prod = TX_RING_IDX(prod);
6492 tx_buf = &txr->tx_buf_ring[ring_prod]; 6494 tx_buf = &txr->tx_buf_ring[ring_prod];
6493 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6495 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6494 skb_shinfo(skb)->frags[i].size, 6496 skb_shinfo(skb)->frags[i].size,
6495 PCI_DMA_TODEVICE); 6497 PCI_DMA_TODEVICE);
6496 } 6498 }
@@ -7643,9 +7645,11 @@ poll_bnx2(struct net_device *dev)
7643 int i; 7645 int i;
7644 7646
7645 for (i = 0; i < bp->irq_nvecs; i++) { 7647 for (i = 0; i < bp->irq_nvecs; i++) {
7646 disable_irq(bp->irq_tbl[i].vector); 7648 struct bnx2_irq *irq = &bp->irq_tbl[i];
7647 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]); 7649
7648 enable_irq(bp->irq_tbl[i].vector); 7650 disable_irq(irq->vector);
7651 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7652 enable_irq(irq->vector);
7649 } 7653 }
7650} 7654}
7651#endif 7655#endif
@@ -8207,7 +8211,7 @@ bnx2_init_napi(struct bnx2 *bp)
8207{ 8211{
8208 int i; 8212 int i;
8209 8213
8210 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 8214 for (i = 0; i < bp->irq_nvecs; i++) {
8211 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; 8215 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8212 int (*poll)(struct napi_struct *, int); 8216 int (*poll)(struct napi_struct *, int);
8213 8217
@@ -8276,7 +8280,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8276 dev->ethtool_ops = &bnx2_ethtool_ops; 8280 dev->ethtool_ops = &bnx2_ethtool_ops;
8277 8281
8278 bp = netdev_priv(dev); 8282 bp = netdev_priv(dev);
8279 bnx2_init_napi(bp);
8280 8283
8281 pci_set_drvdata(pdev, dev); 8284 pci_set_drvdata(pdev, dev);
8282 8285