diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2006-04-25 13:58:50 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-04-26 06:19:45 -0400 |
commit | 734cbc363b159caee158d5a83408c72d98bcacf0 (patch) | |
tree | 14d903eaf2b7580f791af9fd0d2800f1eb91723f /include/linux/netdevice.h | |
parent | 3b908870b8332dfd40be0e919e187aa4991536fb (diff) |
[PATCH] sky2: reschedule if irq still pending
This is a workaround for the case edge-triggered irq's. Several users
seem to have broken configurations sharing edge-triggered irq's. To avoid
losing IRQ's, reshedule if more work arrives.
The changes to netdevice.h are to extract the part that puts device
back in list into separate inline.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 40ccf8cc4239..01db7b88a2b1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -829,19 +829,21 @@ static inline void netif_rx_schedule(struct net_device *dev) | |||
829 | __netif_rx_schedule(dev); | 829 | __netif_rx_schedule(dev); |
830 | } | 830 | } |
831 | 831 | ||
832 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). | 832 | |
833 | * Do not inline this? | 833 | static inline void __netif_rx_reschedule(struct net_device *dev, int undo) |
834 | */ | 834 | { |
835 | dev->quota += undo; | ||
836 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); | ||
837 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
838 | } | ||
839 | |||
840 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ | ||
835 | static inline int netif_rx_reschedule(struct net_device *dev, int undo) | 841 | static inline int netif_rx_reschedule(struct net_device *dev, int undo) |
836 | { | 842 | { |
837 | if (netif_rx_schedule_prep(dev)) { | 843 | if (netif_rx_schedule_prep(dev)) { |
838 | unsigned long flags; | 844 | unsigned long flags; |
839 | |||
840 | dev->quota += undo; | ||
841 | |||
842 | local_irq_save(flags); | 845 | local_irq_save(flags); |
843 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); | 846 | __netif_rx_reschedule(dev, undo); |
844 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
845 | local_irq_restore(flags); | 847 | local_irq_restore(flags); |
846 | return 1; | 848 | return 1; |
847 | } | 849 | } |