diff options
Diffstat (limited to 'drivers/net/ethernet/via/via-rhine.c')
-rw-r--r-- | drivers/net/ethernet/via/via-rhine.c | 29 |
1 files changed, 20 insertions, 9 deletions
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 17e276651601..8fb807ea1caa 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
@@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32; | |||
70 | /* Operational parameters that are set at compile time. */ | 70 | /* Operational parameters that are set at compile time. */ |
71 | 71 | ||
72 | /* Keep the ring sizes a power of two for compile efficiency. | 72 | /* Keep the ring sizes a power of two for compile efficiency. |
73 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. | 73 | * The compiler will convert <unsigned>'%'<2^N> into a bit mask. |
74 | Making the Tx ring too large decreases the effectiveness of channel | 74 | * Making the Tx ring too large decreases the effectiveness of channel |
75 | bonding and packet priority. | 75 | * bonding and packet priority. |
76 | There are no ill effects from too-large receive rings. */ | 76 | * With BQL support, we can increase TX ring safely. |
77 | #define TX_RING_SIZE 16 | 77 | * There are no ill effects from too-large receive rings. |
78 | #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ | 78 | */ |
79 | #define TX_RING_SIZE 64 | ||
80 | #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ | ||
79 | #define RX_RING_SIZE 64 | 81 | #define RX_RING_SIZE 64 |
80 | 82 | ||
81 | /* Operational parameters that usually are not changed. */ | 83 | /* Operational parameters that usually are not changed. */ |
@@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev) | |||
1295 | } | 1297 | } |
1296 | rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); | 1298 | rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); |
1297 | 1299 | ||
1300 | netdev_reset_queue(dev); | ||
1298 | } | 1301 | } |
1299 | 1302 | ||
1300 | static void free_tbufs(struct net_device* dev) | 1303 | static void free_tbufs(struct net_device* dev) |
@@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | |||
1795 | else | 1798 | else |
1796 | rp->tx_ring[entry].tx_status = 0; | 1799 | rp->tx_ring[entry].tx_status = 0; |
1797 | 1800 | ||
1801 | netdev_sent_queue(dev, skb->len); | ||
1798 | /* lock eth irq */ | 1802 | /* lock eth irq */ |
1799 | wmb(); | 1803 | wmb(); |
1800 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); | 1804 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); |
@@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev) | |||
1863 | struct rhine_private *rp = netdev_priv(dev); | 1867 | struct rhine_private *rp = netdev_priv(dev); |
1864 | struct device *hwdev = dev->dev.parent; | 1868 | struct device *hwdev = dev->dev.parent; |
1865 | int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; | 1869 | int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; |
1870 | unsigned int pkts_compl = 0, bytes_compl = 0; | ||
1871 | struct sk_buff *skb; | ||
1866 | 1872 | ||
1867 | /* find and cleanup dirty tx descriptors */ | 1873 | /* find and cleanup dirty tx descriptors */ |
1868 | while (rp->dirty_tx != rp->cur_tx) { | 1874 | while (rp->dirty_tx != rp->cur_tx) { |
@@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev) | |||
1871 | entry, txstatus); | 1877 | entry, txstatus); |
1872 | if (txstatus & DescOwn) | 1878 | if (txstatus & DescOwn) |
1873 | break; | 1879 | break; |
1880 | skb = rp->tx_skbuff[entry]; | ||
1874 | if (txstatus & 0x8000) { | 1881 | if (txstatus & 0x8000) { |
1875 | netif_dbg(rp, tx_done, dev, | 1882 | netif_dbg(rp, tx_done, dev, |
1876 | "Transmit error, Tx status %08x\n", txstatus); | 1883 | "Transmit error, Tx status %08x\n", txstatus); |
@@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev) | |||
1899 | (txstatus >> 3) & 0xF, txstatus & 0xF); | 1906 | (txstatus >> 3) & 0xF, txstatus & 0xF); |
1900 | 1907 | ||
1901 | u64_stats_update_begin(&rp->tx_stats.syncp); | 1908 | u64_stats_update_begin(&rp->tx_stats.syncp); |
1902 | rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; | 1909 | rp->tx_stats.bytes += skb->len; |
1903 | rp->tx_stats.packets++; | 1910 | rp->tx_stats.packets++; |
1904 | u64_stats_update_end(&rp->tx_stats.syncp); | 1911 | u64_stats_update_end(&rp->tx_stats.syncp); |
1905 | } | 1912 | } |
@@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev) | |||
1907 | if (rp->tx_skbuff_dma[entry]) { | 1914 | if (rp->tx_skbuff_dma[entry]) { |
1908 | dma_unmap_single(hwdev, | 1915 | dma_unmap_single(hwdev, |
1909 | rp->tx_skbuff_dma[entry], | 1916 | rp->tx_skbuff_dma[entry], |
1910 | rp->tx_skbuff[entry]->len, | 1917 | skb->len, |
1911 | DMA_TO_DEVICE); | 1918 | DMA_TO_DEVICE); |
1912 | } | 1919 | } |
1913 | dev_consume_skb_any(rp->tx_skbuff[entry]); | 1920 | bytes_compl += skb->len; |
1921 | pkts_compl++; | ||
1922 | dev_consume_skb_any(skb); | ||
1914 | rp->tx_skbuff[entry] = NULL; | 1923 | rp->tx_skbuff[entry] = NULL; |
1915 | entry = (++rp->dirty_tx) % TX_RING_SIZE; | 1924 | entry = (++rp->dirty_tx) % TX_RING_SIZE; |
1916 | } | 1925 | } |
1926 | |||
1927 | netdev_completed_queue(dev, pkts_compl, bytes_compl); | ||
1917 | if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) | 1928 | if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) |
1918 | netif_wake_queue(dev); | 1929 | netif_wake_queue(dev); |
1919 | } | 1930 | } |