diff options
author | Ayaz Abdulla <aabdulla@nvidia.com> | 2007-01-21 18:10:47 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-05 16:58:48 -0500 |
commit | 445583b89d71b48cf8c64e26acc5a710248feed7 (patch) | |
tree | b33a5c8d9d2773d2819a4f89c048484c8d908b09 /drivers | |
parent | aaa37d2d099f97ced415546e285ac901e47a2437 (diff) |
forcedeth: tx data path optimization
This patch optimizes the tx data paths and cleans up the code (removes
vlan from descr1/2 since only valid for desc3, changes to make code
easier to read, etc).
Signed-Off-By: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/forcedeth.c | 115 |
1 files changed, 58 insertions, 57 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 75906ade76f5..27b6bf846000 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1563,7 +1563,6 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1563 | u32 size = skb->len-skb->data_len; | 1563 | u32 size = skb->len-skb->data_len; |
1564 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 1564 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
1565 | u32 empty_slots; | 1565 | u32 empty_slots; |
1566 | u32 tx_flags_vlan = 0; | ||
1567 | struct ring_desc* put_tx; | 1566 | struct ring_desc* put_tx; |
1568 | struct ring_desc* start_tx; | 1567 | struct ring_desc* start_tx; |
1569 | struct ring_desc* prev_tx; | 1568 | struct ring_desc* prev_tx; |
@@ -1576,7 +1575,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1576 | } | 1575 | } |
1577 | 1576 | ||
1578 | empty_slots = nv_get_empty_tx_slots(np); | 1577 | empty_slots = nv_get_empty_tx_slots(np); |
1579 | if (empty_slots <= entries) { | 1578 | if (unlikely(empty_slots <= entries)) { |
1580 | spin_lock_irq(&np->lock); | 1579 | spin_lock_irq(&np->lock); |
1581 | netif_stop_queue(dev); | 1580 | netif_stop_queue(dev); |
1582 | np->tx_stop = 1; | 1581 | np->tx_stop = 1; |
@@ -1596,12 +1595,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1596 | np->put_tx_ctx->dma_len = bcnt; | 1595 | np->put_tx_ctx->dma_len = bcnt; |
1597 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); | 1596 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
1598 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | 1597 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1598 | |||
1599 | tx_flags = np->tx_flags; | 1599 | tx_flags = np->tx_flags; |
1600 | offset += bcnt; | 1600 | offset += bcnt; |
1601 | size -= bcnt; | 1601 | size -= bcnt; |
1602 | if (put_tx++ == np->last_tx.orig) | 1602 | if (unlikely(put_tx++ == np->last_tx.orig)) |
1603 | put_tx = np->first_tx.orig; | 1603 | put_tx = np->first_tx.orig; |
1604 | if (np->put_tx_ctx++ == np->last_tx_ctx) | 1604 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
1605 | np->put_tx_ctx = np->first_tx_ctx; | 1605 | np->put_tx_ctx = np->first_tx_ctx; |
1606 | } while (size); | 1606 | } while (size); |
1607 | 1607 | ||
@@ -1618,14 +1618,14 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1618 | np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | 1618 | np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, |
1619 | PCI_DMA_TODEVICE); | 1619 | PCI_DMA_TODEVICE); |
1620 | np->put_tx_ctx->dma_len = bcnt; | 1620 | np->put_tx_ctx->dma_len = bcnt; |
1621 | |||
1622 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); | 1621 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
1623 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | 1622 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1623 | |||
1624 | offset += bcnt; | 1624 | offset += bcnt; |
1625 | size -= bcnt; | 1625 | size -= bcnt; |
1626 | if (put_tx++ == np->last_tx.orig) | 1626 | if (unlikely(put_tx++ == np->last_tx.orig)) |
1627 | put_tx = np->first_tx.orig; | 1627 | put_tx = np->first_tx.orig; |
1628 | if (np->put_tx_ctx++ == np->last_tx_ctx) | 1628 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
1629 | np->put_tx_ctx = np->first_tx_ctx; | 1629 | np->put_tx_ctx = np->first_tx_ctx; |
1630 | } while (size); | 1630 | } while (size); |
1631 | } | 1631 | } |
@@ -1642,11 +1642,6 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1642 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? | 1642 | tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? |
1643 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; | 1643 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; |
1644 | 1644 | ||
1645 | /* vlan tag */ | ||
1646 | if (np->vlangrp && vlan_tx_tag_present(skb)) { | ||
1647 | tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); | ||
1648 | } | ||
1649 | |||
1650 | spin_lock_irq(&np->lock); | 1645 | spin_lock_irq(&np->lock); |
1651 | 1646 | ||
1652 | /* set tx flags */ | 1647 | /* set tx flags */ |
@@ -1669,7 +1664,6 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1669 | 1664 | ||
1670 | dev->trans_start = jiffies; | 1665 | dev->trans_start = jiffies; |
1671 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 1666 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
1672 | pci_push(get_hwbase(dev)); | ||
1673 | return NETDEV_TX_OK; | 1667 | return NETDEV_TX_OK; |
1674 | } | 1668 | } |
1675 | 1669 | ||
@@ -1677,7 +1671,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | |||
1677 | { | 1671 | { |
1678 | struct fe_priv *np = netdev_priv(dev); | 1672 | struct fe_priv *np = netdev_priv(dev); |
1679 | u32 tx_flags = 0; | 1673 | u32 tx_flags = 0; |
1680 | u32 tx_flags_extra = NV_TX2_LASTPACKET; | 1674 | u32 tx_flags_extra; |
1681 | unsigned int fragments = skb_shinfo(skb)->nr_frags; | 1675 | unsigned int fragments = skb_shinfo(skb)->nr_frags; |
1682 | unsigned int i; | 1676 | unsigned int i; |
1683 | u32 offset = 0; | 1677 | u32 offset = 0; |
@@ -1685,7 +1679,6 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | |||
1685 | u32 size = skb->len-skb->data_len; | 1679 | u32 size = skb->len-skb->data_len; |
1686 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 1680 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
1687 | u32 empty_slots; | 1681 | u32 empty_slots; |
1688 | u32 tx_flags_vlan = 0; | ||
1689 | struct ring_desc_ex* put_tx; | 1682 | struct ring_desc_ex* put_tx; |
1690 | struct ring_desc_ex* start_tx; | 1683 | struct ring_desc_ex* start_tx; |
1691 | struct ring_desc_ex* prev_tx; | 1684 | struct ring_desc_ex* prev_tx; |
@@ -1698,7 +1691,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | |||
1698 | } | 1691 | } |
1699 | 1692 | ||
1700 | empty_slots = nv_get_empty_tx_slots(np); | 1693 | empty_slots = nv_get_empty_tx_slots(np); |
1701 | if (empty_slots <= entries) { | 1694 | if (unlikely(empty_slots <= entries)) { |
1702 | spin_lock_irq(&np->lock); | 1695 | spin_lock_irq(&np->lock); |
1703 | netif_stop_queue(dev); | 1696 | netif_stop_queue(dev); |
1704 | np->tx_stop = 1; | 1697 | np->tx_stop = 1; |
@@ -1719,12 +1712,13 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | |||
1719 | put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; | 1712 | put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; |
1720 | put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; | 1713 | put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; |
1721 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | 1714 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1722 | tx_flags = np->tx_flags; | 1715 | |
1716 | tx_flags = NV_TX2_VALID; | ||
1723 | offset += bcnt; | 1717 | offset += bcnt; |
1724 | size -= bcnt; | 1718 | size -= bcnt; |
1725 | if (put_tx++ == np->last_tx.ex) | 1719 | if (unlikely(put_tx++ == np->last_tx.ex)) |
1726 | put_tx = np->first_tx.ex; | 1720 | put_tx = np->first_tx.ex; |
1727 | if (np->put_tx_ctx++ == np->last_tx_ctx) | 1721 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
1728 | np->put_tx_ctx = np->first_tx_ctx; | 1722 | np->put_tx_ctx = np->first_tx_ctx; |
1729 | } while (size); | 1723 | } while (size); |
1730 | 1724 | ||
@@ -1741,21 +1735,21 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | |||
1741 | np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | 1735 | np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, |
1742 | PCI_DMA_TODEVICE); | 1736 | PCI_DMA_TODEVICE); |
1743 | np->put_tx_ctx->dma_len = bcnt; | 1737 | np->put_tx_ctx->dma_len = bcnt; |
1744 | |||
1745 | put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; | 1738 | put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; |
1746 | put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; | 1739 | put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; |
1747 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | 1740 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
1741 | |||
1748 | offset += bcnt; | 1742 | offset += bcnt; |
1749 | size -= bcnt; | 1743 | size -= bcnt; |
1750 | if (put_tx++ == np->last_tx.ex) | 1744 | if (unlikely(put_tx++ == np->last_tx.ex)) |
1751 | put_tx = np->first_tx.ex; | 1745 | put_tx = np->first_tx.ex; |
1752 | if (np->put_tx_ctx++ == np->last_tx_ctx) | 1746 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
1753 | np->put_tx_ctx = np->first_tx_ctx; | 1747 | np->put_tx_ctx = np->first_tx_ctx; |
1754 | } while (size); | 1748 | } while (size); |
1755 | } | 1749 | } |
1756 | 1750 | ||
1757 | /* set last fragment flag */ | 1751 | /* set last fragment flag */ |
1758 | prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); | 1752 | prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); |
1759 | 1753 | ||
1760 | /* save skb in this slot's context area */ | 1754 | /* save skb in this slot's context area */ |
1761 | prev_tx_ctx->skb = skb; | 1755 | prev_tx_ctx->skb = skb; |
@@ -1767,14 +1761,18 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | |||
1767 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; | 1761 | NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; |
1768 | 1762 | ||
1769 | /* vlan tag */ | 1763 | /* vlan tag */ |
1770 | if (np->vlangrp && vlan_tx_tag_present(skb)) { | 1764 | if (likely(!np->vlangrp)) { |
1771 | tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); | 1765 | start_tx->txvlan = 0; |
1766 | } else { | ||
1767 | if (vlan_tx_tag_present(skb)) | ||
1768 | start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); | ||
1769 | else | ||
1770 | start_tx->txvlan = 0; | ||
1772 | } | 1771 | } |
1773 | 1772 | ||
1774 | spin_lock_irq(&np->lock); | 1773 | spin_lock_irq(&np->lock); |
1775 | 1774 | ||
1776 | /* set tx flags */ | 1775 | /* set tx flags */ |
1777 | start_tx->txvlan = cpu_to_le32(tx_flags_vlan); | ||
1778 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1776 | start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1779 | np->put_tx.ex = put_tx; | 1777 | np->put_tx.ex = put_tx; |
1780 | 1778 | ||
@@ -1794,7 +1792,6 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | |||
1794 | 1792 | ||
1795 | dev->trans_start = jiffies; | 1793 | dev->trans_start = jiffies; |
1796 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | 1794 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
1797 | pci_push(get_hwbase(dev)); | ||
1798 | return NETDEV_TX_OK; | 1795 | return NETDEV_TX_OK; |
1799 | } | 1796 | } |
1800 | 1797 | ||
@@ -1807,21 +1804,22 @@ static void nv_tx_done(struct net_device *dev) | |||
1807 | { | 1804 | { |
1808 | struct fe_priv *np = netdev_priv(dev); | 1805 | struct fe_priv *np = netdev_priv(dev); |
1809 | u32 flags; | 1806 | u32 flags; |
1810 | struct sk_buff *skb; | ||
1811 | struct ring_desc* orig_get_tx = np->get_tx.orig; | 1807 | struct ring_desc* orig_get_tx = np->get_tx.orig; |
1812 | 1808 | ||
1813 | while (np->get_tx.orig != np->put_tx.orig) { | 1809 | while ((np->get_tx.orig != np->put_tx.orig) && |
1814 | flags = le32_to_cpu(np->get_tx.orig->flaglen); | 1810 | !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) { |
1815 | 1811 | ||
1816 | dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", | 1812 | dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", |
1817 | dev->name, flags); | 1813 | dev->name, flags); |
1818 | if (flags & NV_TX_VALID) | 1814 | |
1819 | break; | 1815 | pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, |
1816 | np->get_tx_ctx->dma_len, | ||
1817 | PCI_DMA_TODEVICE); | ||
1818 | np->get_tx_ctx->dma = 0; | ||
1819 | |||
1820 | if (np->desc_ver == DESC_VER_1) { | 1820 | if (np->desc_ver == DESC_VER_1) { |
1821 | if (flags & NV_TX_LASTPACKET) { | 1821 | if (flags & NV_TX_LASTPACKET) { |
1822 | skb = np->get_tx_ctx->skb; | 1822 | if (flags & NV_TX_ERROR) { |
1823 | if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| | ||
1824 | NV_TX_UNDERFLOW|NV_TX_ERROR)) { | ||
1825 | if (flags & NV_TX_UNDERFLOW) | 1823 | if (flags & NV_TX_UNDERFLOW) |
1826 | np->stats.tx_fifo_errors++; | 1824 | np->stats.tx_fifo_errors++; |
1827 | if (flags & NV_TX_CARRIERLOST) | 1825 | if (flags & NV_TX_CARRIERLOST) |
@@ -1829,14 +1827,14 @@ static void nv_tx_done(struct net_device *dev) | |||
1829 | np->stats.tx_errors++; | 1827 | np->stats.tx_errors++; |
1830 | } else { | 1828 | } else { |
1831 | np->stats.tx_packets++; | 1829 | np->stats.tx_packets++; |
1832 | np->stats.tx_bytes += skb->len; | 1830 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; |
1833 | } | 1831 | } |
1832 | dev_kfree_skb_any(np->get_tx_ctx->skb); | ||
1833 | np->get_tx_ctx->skb = NULL; | ||
1834 | } | 1834 | } |
1835 | } else { | 1835 | } else { |
1836 | if (flags & NV_TX2_LASTPACKET) { | 1836 | if (flags & NV_TX2_LASTPACKET) { |
1837 | skb = np->get_tx_ctx->skb; | 1837 | if (flags & NV_TX2_ERROR) { |
1838 | if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| | ||
1839 | NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { | ||
1840 | if (flags & NV_TX2_UNDERFLOW) | 1838 | if (flags & NV_TX2_UNDERFLOW) |
1841 | np->stats.tx_fifo_errors++; | 1839 | np->stats.tx_fifo_errors++; |
1842 | if (flags & NV_TX2_CARRIERLOST) | 1840 | if (flags & NV_TX2_CARRIERLOST) |
@@ -1844,17 +1842,18 @@ static void nv_tx_done(struct net_device *dev) | |||
1844 | np->stats.tx_errors++; | 1842 | np->stats.tx_errors++; |
1845 | } else { | 1843 | } else { |
1846 | np->stats.tx_packets++; | 1844 | np->stats.tx_packets++; |
1847 | np->stats.tx_bytes += skb->len; | 1845 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; |
1848 | } | 1846 | } |
1847 | dev_kfree_skb_any(np->get_tx_ctx->skb); | ||
1848 | np->get_tx_ctx->skb = NULL; | ||
1849 | } | 1849 | } |
1850 | } | 1850 | } |
1851 | nv_release_txskb(dev, np->get_tx_ctx); | 1851 | if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) |
1852 | if (np->get_tx.orig++ == np->last_tx.orig) | ||
1853 | np->get_tx.orig = np->first_tx.orig; | 1852 | np->get_tx.orig = np->first_tx.orig; |
1854 | if (np->get_tx_ctx++ == np->last_tx_ctx) | 1853 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) |
1855 | np->get_tx_ctx = np->first_tx_ctx; | 1854 | np->get_tx_ctx = np->first_tx_ctx; |
1856 | } | 1855 | } |
1857 | if ((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx)) { | 1856 | if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { |
1858 | np->tx_stop = 0; | 1857 | np->tx_stop = 0; |
1859 | netif_wake_queue(dev); | 1858 | netif_wake_queue(dev); |
1860 | } | 1859 | } |
@@ -1864,20 +1863,21 @@ static void nv_tx_done_optimized(struct net_device *dev) | |||
1864 | { | 1863 | { |
1865 | struct fe_priv *np = netdev_priv(dev); | 1864 | struct fe_priv *np = netdev_priv(dev); |
1866 | u32 flags; | 1865 | u32 flags; |
1867 | struct sk_buff *skb; | ||
1868 | struct ring_desc_ex* orig_get_tx = np->get_tx.ex; | 1866 | struct ring_desc_ex* orig_get_tx = np->get_tx.ex; |
1869 | 1867 | ||
1870 | while (np->get_tx.ex == np->put_tx.ex) { | 1868 | while ((np->get_tx.ex != np->put_tx.ex) && |
1871 | flags = le32_to_cpu(np->get_tx.ex->flaglen); | 1869 | !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID)) { |
1872 | 1870 | ||
1873 | dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", | 1871 | dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", |
1874 | dev->name, flags); | 1872 | dev->name, flags); |
1875 | if (flags & NV_TX_VALID) | 1873 | |
1876 | break; | 1874 | pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, |
1875 | np->get_tx_ctx->dma_len, | ||
1876 | PCI_DMA_TODEVICE); | ||
1877 | np->get_tx_ctx->dma = 0; | ||
1878 | |||
1877 | if (flags & NV_TX2_LASTPACKET) { | 1879 | if (flags & NV_TX2_LASTPACKET) { |
1878 | skb = np->get_tx_ctx->skb; | 1880 | if (flags & NV_TX2_ERROR) { |
1879 | if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| | ||
1880 | NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { | ||
1881 | if (flags & NV_TX2_UNDERFLOW) | 1881 | if (flags & NV_TX2_UNDERFLOW) |
1882 | np->stats.tx_fifo_errors++; | 1882 | np->stats.tx_fifo_errors++; |
1883 | if (flags & NV_TX2_CARRIERLOST) | 1883 | if (flags & NV_TX2_CARRIERLOST) |
@@ -1885,16 +1885,17 @@ static void nv_tx_done_optimized(struct net_device *dev) | |||
1885 | np->stats.tx_errors++; | 1885 | np->stats.tx_errors++; |
1886 | } else { | 1886 | } else { |
1887 | np->stats.tx_packets++; | 1887 | np->stats.tx_packets++; |
1888 | np->stats.tx_bytes += skb->len; | 1888 | np->stats.tx_bytes += np->get_tx_ctx->skb->len; |
1889 | } | 1889 | } |
1890 | dev_kfree_skb_any(np->get_tx_ctx->skb); | ||
1891 | np->get_tx_ctx->skb = NULL; | ||
1890 | } | 1892 | } |
1891 | nv_release_txskb(dev, np->get_tx_ctx); | 1893 | if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) |
1892 | if (np->get_tx.ex++ == np->last_tx.ex) | ||
1893 | np->get_tx.ex = np->first_tx.ex; | 1894 | np->get_tx.ex = np->first_tx.ex; |
1894 | if (np->get_tx_ctx++ == np->last_tx_ctx) | 1895 | if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) |
1895 | np->get_tx_ctx = np->first_tx_ctx; | 1896 | np->get_tx_ctx = np->first_tx_ctx; |
1896 | } | 1897 | } |
1897 | if ((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx)) { | 1898 | if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { |
1898 | np->tx_stop = 0; | 1899 | np->tx_stop = 0; |
1899 | netif_wake_queue(dev); | 1900 | netif_wake_queue(dev); |
1900 | } | 1901 | } |