aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-04-25 13:58:50 -0400
committerJeff Garzik <jeff@garzik.org>2006-04-26 06:19:45 -0400
commit734cbc363b159caee158d5a83408c72d98bcacf0 (patch)
tree14d903eaf2b7580f791af9fd0d2800f1eb91723f /drivers
parent3b908870b8332dfd40be0e919e187aa4991536fb (diff)
[PATCH] sky2: reschedule if irq still pending
This is a workaround for the case edge-triggered irq's. Several users seem to have broken configurations sharing edge-triggered irq's. To avoid losing IRQ's, reshedule if more work arrives. The changes to netdevice.h are to extract the part that puts device back in list into separate inline. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/sky2.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 67b0eab16589..618fde8622ca 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2093,6 +2093,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2093 int work_done = 0; 2093 int work_done = 0;
2094 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2094 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2095 2095
2096 restart_poll:
2096 if (unlikely(status & ~Y2_IS_STAT_BMU)) { 2097 if (unlikely(status & ~Y2_IS_STAT_BMU)) {
2097 if (status & Y2_IS_HW_ERR) 2098 if (status & Y2_IS_HW_ERR)
2098 sky2_hw_intr(hw); 2099 sky2_hw_intr(hw);
@@ -2123,7 +2124,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2123 } 2124 }
2124 2125
2125 if (status & Y2_IS_STAT_BMU) { 2126 if (status & Y2_IS_STAT_BMU) {
2126 work_done = sky2_status_intr(hw, work_limit); 2127 work_done += sky2_status_intr(hw, work_limit - work_done);
2127 *budget -= work_done; 2128 *budget -= work_done;
2128 dev0->quota -= work_done; 2129 dev0->quota -= work_done;
2129 2130
@@ -2133,9 +2134,22 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2133 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2134 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2134 } 2135 }
2135 2136
2136 netif_rx_complete(dev0); 2137 local_irq_disable();
2138 __netif_rx_complete(dev0);
2137 2139
2138 status = sky2_read32(hw, B0_Y2_SP_LISR); 2140 status = sky2_read32(hw, B0_Y2_SP_LISR);
2141
2142 if (unlikely(status)) {
2143 /* More work pending, try and keep going */
2144 if (__netif_rx_schedule_prep(dev0)) {
2145 __netif_rx_reschedule(dev0, work_done);
2146 status = sky2_read32(hw, B0_Y2_SP_EISR);
2147 local_irq_enable();
2148 goto restart_poll;
2149 }
2150 }
2151
2152 local_irq_enable();
2139 return 0; 2153 return 0;
2140} 2154}
2141 2155
@@ -2153,8 +2167,6 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2153 prefetch(&hw->st_le[hw->st_idx]); 2167 prefetch(&hw->st_le[hw->st_idx]);
2154 if (likely(__netif_rx_schedule_prep(dev0))) 2168 if (likely(__netif_rx_schedule_prep(dev0)))
2155 __netif_rx_schedule(dev0); 2169 __netif_rx_schedule(dev0);
2156 else
2157 printk(KERN_DEBUG PFX "irq race detected\n");
2158 2170
2159 return IRQ_HANDLED; 2171 return IRQ_HANDLED;
2160} 2172}