diff options
author | Michael Chan <mchan@broadcom.com> | 2006-03-20 20:49:02 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-03-20 20:49:02 -0500 |
commit | 236b6394bb49ea58465c6f935a286d2342576f8d (patch) | |
tree | e00c64b5dcb909e3e700021f6c2368af55f0641c /drivers/net | |
parent | 244ac4f446ac6a19caf5eb692c4844f29e6478bf (diff) |
[BNX2]: Fix bug when rx ring is full
Fix the rx code path that does not handle the full rx ring correctly.
When the rx ring is set to the max. size (i.e. 255), the consumer and
producer indices will be the same when completing an rx packet. Fix
the rx code to handle this condition properly.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/bnx2.c | 41 |
1 files changed, 25 insertions, 16 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 6fbb5486163d..0d592f7c3a99 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -1656,23 +1656,30 @@ static inline void | |||
1656 | bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, | 1656 | bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, |
1657 | u16 cons, u16 prod) | 1657 | u16 cons, u16 prod) |
1658 | { | 1658 | { |
1659 | struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons]; | 1659 | struct sw_bd *cons_rx_buf, *prod_rx_buf; |
1660 | struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod]; | 1660 | struct rx_bd *cons_bd, *prod_bd; |
1661 | struct rx_bd *cons_bd = &bp->rx_desc_ring[cons]; | 1661 | |
1662 | struct rx_bd *prod_bd = &bp->rx_desc_ring[prod]; | 1662 | cons_rx_buf = &bp->rx_buf_ring[cons]; |
1663 | prod_rx_buf = &bp->rx_buf_ring[prod]; | ||
1663 | 1664 | ||
1664 | pci_dma_sync_single_for_device(bp->pdev, | 1665 | pci_dma_sync_single_for_device(bp->pdev, |
1665 | pci_unmap_addr(cons_rx_buf, mapping), | 1666 | pci_unmap_addr(cons_rx_buf, mapping), |
1666 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 1667 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); |
1667 | 1668 | ||
1668 | prod_rx_buf->skb = cons_rx_buf->skb; | 1669 | bp->rx_prod_bseq += bp->rx_buf_use_size; |
1669 | pci_unmap_addr_set(prod_rx_buf, mapping, | ||
1670 | pci_unmap_addr(cons_rx_buf, mapping)); | ||
1671 | 1670 | ||
1672 | memcpy(prod_bd, cons_bd, 8); | 1671 | prod_rx_buf->skb = skb; |
1673 | 1672 | ||
1674 | bp->rx_prod_bseq += bp->rx_buf_use_size; | 1673 | if (cons == prod) |
1674 | return; | ||
1675 | 1675 | ||
1676 | pci_unmap_addr_set(prod_rx_buf, mapping, | ||
1677 | pci_unmap_addr(cons_rx_buf, mapping)); | ||
1678 | |||
1679 | cons_bd = &bp->rx_desc_ring[cons]; | ||
1680 | prod_bd = &bp->rx_desc_ring[prod]; | ||
1681 | prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; | ||
1682 | prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; | ||
1676 | } | 1683 | } |
1677 | 1684 | ||
1678 | static int | 1685 | static int |
@@ -1699,14 +1706,19 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
1699 | u32 status; | 1706 | u32 status; |
1700 | struct sw_bd *rx_buf; | 1707 | struct sw_bd *rx_buf; |
1701 | struct sk_buff *skb; | 1708 | struct sk_buff *skb; |
1709 | dma_addr_t dma_addr; | ||
1702 | 1710 | ||
1703 | sw_ring_cons = RX_RING_IDX(sw_cons); | 1711 | sw_ring_cons = RX_RING_IDX(sw_cons); |
1704 | sw_ring_prod = RX_RING_IDX(sw_prod); | 1712 | sw_ring_prod = RX_RING_IDX(sw_prod); |
1705 | 1713 | ||
1706 | rx_buf = &bp->rx_buf_ring[sw_ring_cons]; | 1714 | rx_buf = &bp->rx_buf_ring[sw_ring_cons]; |
1707 | skb = rx_buf->skb; | 1715 | skb = rx_buf->skb; |
1708 | pci_dma_sync_single_for_cpu(bp->pdev, | 1716 | |
1709 | pci_unmap_addr(rx_buf, mapping), | 1717 | rx_buf->skb = NULL; |
1718 | |||
1719 | dma_addr = pci_unmap_addr(rx_buf, mapping); | ||
1720 | |||
1721 | pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, | ||
1710 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 1722 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); |
1711 | 1723 | ||
1712 | rx_hdr = (struct l2_fhdr *) skb->data; | 1724 | rx_hdr = (struct l2_fhdr *) skb->data; |
@@ -1747,8 +1759,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
1747 | skb = new_skb; | 1759 | skb = new_skb; |
1748 | } | 1760 | } |
1749 | else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { | 1761 | else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { |
1750 | pci_unmap_single(bp->pdev, | 1762 | pci_unmap_single(bp->pdev, dma_addr, |
1751 | pci_unmap_addr(rx_buf, mapping), | ||
1752 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 1763 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); |
1753 | 1764 | ||
1754 | skb_reserve(skb, bp->rx_offset); | 1765 | skb_reserve(skb, bp->rx_offset); |
@@ -1794,8 +1805,6 @@ reuse_rx: | |||
1794 | rx_pkt++; | 1805 | rx_pkt++; |
1795 | 1806 | ||
1796 | next_rx: | 1807 | next_rx: |
1797 | rx_buf->skb = NULL; | ||
1798 | |||
1799 | sw_cons = NEXT_RX_BD(sw_cons); | 1808 | sw_cons = NEXT_RX_BD(sw_cons); |
1800 | sw_prod = NEXT_RX_BD(sw_prod); | 1809 | sw_prod = NEXT_RX_BD(sw_prod); |
1801 | 1810 | ||
@@ -3360,7 +3369,7 @@ bnx2_init_rx_ring(struct bnx2 *bp) | |||
3360 | val = (u64) bp->rx_desc_mapping & 0xffffffff; | 3369 | val = (u64) bp->rx_desc_mapping & 0xffffffff; |
3361 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); | 3370 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); |
3362 | 3371 | ||
3363 | for ( ;ring_prod < bp->rx_ring_size; ) { | 3372 | for (i = 0; i < bp->rx_ring_size; i++) { |
3364 | if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { | 3373 | if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { |
3365 | break; | 3374 | break; |
3366 | } | 3375 | } |