aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7c0f0ccbbb29..ae6cf7827e57 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -210,7 +210,7 @@ enum {
210 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 210 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
211 */ 211 */
212 NvRegPollingInterval = 0x00c, 212 NvRegPollingInterval = 0x00c,
213#define NVREG_POLL_DEFAULT_THROUGHPUT 970 213#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
214#define NVREG_POLL_DEFAULT_CPU 13 214#define NVREG_POLL_DEFAULT_CPU 13
215 NvRegMSIMap0 = 0x020, 215 NvRegMSIMap0 = 0x020,
216 NvRegMSIMap1 = 0x024, 216 NvRegMSIMap1 = 0x024,
@@ -1859,14 +1859,15 @@ static void nv_tx_done(struct net_device *dev)
1859 } 1859 }
1860} 1860}
1861 1861
1862static void nv_tx_done_optimized(struct net_device *dev) 1862static void nv_tx_done_optimized(struct net_device *dev, int limit)
1863{ 1863{
1864 struct fe_priv *np = netdev_priv(dev); 1864 struct fe_priv *np = netdev_priv(dev);
1865 u32 flags; 1865 u32 flags;
1866 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 1866 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
1867 1867
1868 while ((np->get_tx.ex != np->put_tx.ex) && 1868 while ((np->get_tx.ex != np->put_tx.ex) &&
1869 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID)) { 1869 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
1870 (limit-- > 0)) {
1870 1871
1871 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 1872 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
1872 dev->name, flags); 1873 dev->name, flags);
@@ -1973,7 +1974,7 @@ static void nv_tx_timeout(struct net_device *dev)
1973 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1974 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1974 nv_tx_done(dev); 1975 nv_tx_done(dev);
1975 else 1976 else
1976 nv_tx_done_optimized(dev); 1977 nv_tx_done_optimized(dev, np->tx_ring_size);
1977 1978
1978 /* 3) if there are dead entries: clear everything */ 1979 /* 3) if there are dead entries: clear everything */
1979 if (np->get_tx_ctx != np->put_tx_ctx) { 1980 if (np->get_tx_ctx != np->put_tx_ctx) {
@@ -2899,7 +2900,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2899 break; 2900 break;
2900 2901
2901 spin_lock(&np->lock); 2902 spin_lock(&np->lock);
2902 nv_tx_done_optimized(dev); 2903 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
2903 spin_unlock(&np->lock); 2904 spin_unlock(&np->lock);
2904 2905
2905#ifdef CONFIG_FORCEDETH_NAPI 2906#ifdef CONFIG_FORCEDETH_NAPI
@@ -3006,7 +3007,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3006 break; 3007 break;
3007 3008
3008 spin_lock_irqsave(&np->lock, flags); 3009 spin_lock_irqsave(&np->lock, flags);
3009 nv_tx_done_optimized(dev); 3010 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3010 spin_unlock_irqrestore(&np->lock, flags); 3011 spin_unlock_irqrestore(&np->lock, flags);
3011 3012
3012 if (unlikely(events & (NVREG_IRQ_TX_ERR))) { 3013 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
@@ -3163,6 +3164,11 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3163 if (!(events & np->irqmask)) 3164 if (!(events & np->irqmask))
3164 break; 3165 break;
3165 3166
3167 /* check tx in case we reached max loop limit in tx isr */
3168 spin_lock_irqsave(&np->lock, flags);
3169 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3170 spin_unlock_irqrestore(&np->lock, flags);
3171
3166 if (events & NVREG_IRQ_LINK) { 3172 if (events & NVREG_IRQ_LINK) {
3167 spin_lock_irqsave(&np->lock, flags); 3173 spin_lock_irqsave(&np->lock, flags);
3168 nv_link_irq(dev); 3174 nv_link_irq(dev);