aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/forcedeth.c29
1 files changed, 13 insertions, 16 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index f28ae12d8569..75906ade76f5 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -518,12 +518,6 @@ union ring_type {
518#define TX_RING_MIN 64 518#define TX_RING_MIN 64
519#define RING_MAX_DESC_VER_1 1024 519#define RING_MAX_DESC_VER_1 1024
520#define RING_MAX_DESC_VER_2_3 16384 520#define RING_MAX_DESC_VER_2_3 16384
521/*
522 * Difference between the get and put pointers for the tx ring.
523 * This is used to throttle the amount of data outstanding in the
524 * tx ring.
525 */
526#define TX_LIMIT_DIFFERENCE 1
527 521
528/* rx/tx mac addr + type + vlan + align + slack*/ 522/* rx/tx mac addr + type + vlan + align + slack*/
529#define NV_RX_HEADERS (64) 523#define NV_RX_HEADERS (64)
@@ -777,8 +771,7 @@ struct fe_priv {
777 union ring_type tx_ring; 771 union ring_type tx_ring;
778 u32 tx_flags; 772 u32 tx_flags;
779 int tx_ring_size; 773 int tx_ring_size;
780 int tx_limit_start; 774 int tx_stop;
781 int tx_limit_stop;
782 775
783 /* vlan fields */ 776 /* vlan fields */
784 struct vlan_group *vlangrp; 777 struct vlan_group *vlangrp;
@@ -1583,9 +1576,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1583 } 1576 }
1584 1577
1585 empty_slots = nv_get_empty_tx_slots(np); 1578 empty_slots = nv_get_empty_tx_slots(np);
1586 if ((empty_slots - np->tx_limit_stop) <= entries) { 1579 if (empty_slots <= entries) {
1587 spin_lock_irq(&np->lock); 1580 spin_lock_irq(&np->lock);
1588 netif_stop_queue(dev); 1581 netif_stop_queue(dev);
1582 np->tx_stop = 1;
1589 spin_unlock_irq(&np->lock); 1583 spin_unlock_irq(&np->lock);
1590 return NETDEV_TX_BUSY; 1584 return NETDEV_TX_BUSY;
1591 } 1585 }
@@ -1704,9 +1698,10 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1704 } 1698 }
1705 1699
1706 empty_slots = nv_get_empty_tx_slots(np); 1700 empty_slots = nv_get_empty_tx_slots(np);
1707 if ((empty_slots - np->tx_limit_stop) <= entries) { 1701 if (empty_slots <= entries) {
1708 spin_lock_irq(&np->lock); 1702 spin_lock_irq(&np->lock);
1709 netif_stop_queue(dev); 1703 netif_stop_queue(dev);
1704 np->tx_stop = 1;
1710 spin_unlock_irq(&np->lock); 1705 spin_unlock_irq(&np->lock);
1711 return NETDEV_TX_BUSY; 1706 return NETDEV_TX_BUSY;
1712 } 1707 }
@@ -1813,6 +1808,7 @@ static void nv_tx_done(struct net_device *dev)
1813 struct fe_priv *np = netdev_priv(dev); 1808 struct fe_priv *np = netdev_priv(dev);
1814 u32 flags; 1809 u32 flags;
1815 struct sk_buff *skb; 1810 struct sk_buff *skb;
1811 struct ring_desc* orig_get_tx = np->get_tx.orig;
1816 1812
1817 while (np->get_tx.orig != np->put_tx.orig) { 1813 while (np->get_tx.orig != np->put_tx.orig) {
1818 flags = le32_to_cpu(np->get_tx.orig->flaglen); 1814 flags = le32_to_cpu(np->get_tx.orig->flaglen);
@@ -1858,8 +1854,10 @@ static void nv_tx_done(struct net_device *dev)
1858 if (np->get_tx_ctx++ == np->last_tx_ctx) 1854 if (np->get_tx_ctx++ == np->last_tx_ctx)
1859 np->get_tx_ctx = np->first_tx_ctx; 1855 np->get_tx_ctx = np->first_tx_ctx;
1860 } 1856 }
1861 if (nv_get_empty_tx_slots(np) > np->tx_limit_start) 1857 if ((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx)) {
1858 np->tx_stop = 0;
1862 netif_wake_queue(dev); 1859 netif_wake_queue(dev);
1860 }
1863} 1861}
1864 1862
1865static void nv_tx_done_optimized(struct net_device *dev) 1863static void nv_tx_done_optimized(struct net_device *dev)
@@ -1867,6 +1865,7 @@ static void nv_tx_done_optimized(struct net_device *dev)
1867 struct fe_priv *np = netdev_priv(dev); 1865 struct fe_priv *np = netdev_priv(dev);
1868 u32 flags; 1866 u32 flags;
1869 struct sk_buff *skb; 1867 struct sk_buff *skb;
1868 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
1870 1869
1871 while (np->get_tx.ex == np->put_tx.ex) { 1870 while (np->get_tx.ex == np->put_tx.ex) {
1872 flags = le32_to_cpu(np->get_tx.ex->flaglen); 1871 flags = le32_to_cpu(np->get_tx.ex->flaglen);
@@ -1895,8 +1894,10 @@ static void nv_tx_done_optimized(struct net_device *dev)
1895 if (np->get_tx_ctx++ == np->last_tx_ctx) 1894 if (np->get_tx_ctx++ == np->last_tx_ctx)
1896 np->get_tx_ctx = np->first_tx_ctx; 1895 np->get_tx_ctx = np->first_tx_ctx;
1897 } 1896 }
1898 if (nv_get_empty_tx_slots(np) > np->tx_limit_start) 1897 if ((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx)) {
1898 np->tx_stop = 0;
1899 netif_wake_queue(dev); 1899 netif_wake_queue(dev);
1900 }
1900} 1901}
1901 1902
1902/* 1903/*
@@ -4001,8 +4002,6 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4001 /* set new values */ 4002 /* set new values */
4002 np->rx_ring_size = ring->rx_pending; 4003 np->rx_ring_size = ring->rx_pending;
4003 np->tx_ring_size = ring->tx_pending; 4004 np->tx_ring_size = ring->tx_pending;
4004 np->tx_limit_stop = TX_LIMIT_DIFFERENCE;
4005 np->tx_limit_start = TX_LIMIT_DIFFERENCE;
4006 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4005 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4007 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4006 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4008 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4007 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -4967,8 +4966,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4967 4966
4968 np->rx_ring_size = RX_RING_DEFAULT; 4967 np->rx_ring_size = RX_RING_DEFAULT;
4969 np->tx_ring_size = TX_RING_DEFAULT; 4968 np->tx_ring_size = TX_RING_DEFAULT;
4970 np->tx_limit_stop = TX_LIMIT_DIFFERENCE;
4971 np->tx_limit_start = TX_LIMIT_DIFFERENCE;
4972 4969
4973 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4970 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4974 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 4971 np->rx_ring.orig = pci_alloc_consistent(pci_dev,