diff options
author | Jarek Poplawski <jarkao2@o2.pl> | 2007-07-05 20:42:44 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-07-05 20:42:44 -0400 |
commit | 25442cafb8cc3d979418caccabc91260707a0947 (patch) | |
tree | 3f934e686d37619a211946f5d78dd28fad82e61b /net/core | |
parent | 94b83419e5b56a87410fd9c9939f0081fc155d65 (diff) |
[NETPOLL]: Fixups for 'fix soft lockup when removing module'
>From my recent patch:
> > #1
> > Until kernel ver. 2.6.21 (including) cancel_rearming_delayed_work()
> > required a work function should always (unconditionally) rearm with
> > delay > 0 - otherwise it would endlessly loop. This patch replaces
> > this function with cancel_delayed_work(). Later kernel versions don't
> > require this, so here it's only for uniformity.
But Oleg Nesterov <oleg@tv-sign.ru> found:
> But 2.6.22 doesn't need this change, why it was merged?
>
> In fact, I suspect this change adds a race,
...
His description was right (thanks), so this patch reverts #1.
Signed-off-by: Jarek Poplawski <jarkao2@o2.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/netpoll.c | 6 |
1 files changed, 2 insertions, 4 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index cf40ff91ac01..a0efdd7a6b37 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -72,8 +72,7 @@ static void queue_process(struct work_struct *work) | |||
72 | netif_tx_unlock(dev); | 72 | netif_tx_unlock(dev); |
73 | local_irq_restore(flags); | 73 | local_irq_restore(flags); |
74 | 74 | ||
75 | if (atomic_read(&npinfo->refcnt)) | 75 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
76 | schedule_delayed_work(&npinfo->tx_work, HZ/10); | ||
77 | return; | 76 | return; |
78 | } | 77 | } |
79 | netif_tx_unlock(dev); | 78 | netif_tx_unlock(dev); |
@@ -786,8 +785,7 @@ void netpoll_cleanup(struct netpoll *np) | |||
786 | if (atomic_dec_and_test(&npinfo->refcnt)) { | 785 | if (atomic_dec_and_test(&npinfo->refcnt)) { |
787 | skb_queue_purge(&npinfo->arp_tx); | 786 | skb_queue_purge(&npinfo->arp_tx); |
788 | skb_queue_purge(&npinfo->txq); | 787 | skb_queue_purge(&npinfo->txq); |
789 | cancel_delayed_work(&npinfo->tx_work); | 788 | cancel_rearming_delayed_work(&npinfo->tx_work); |
790 | flush_scheduled_work(); | ||
791 | 789 | ||
792 | /* clean after last, unfinished work */ | 790 | /* clean after last, unfinished work */ |
793 | if (!skb_queue_empty(&npinfo->txq)) { | 791 | if (!skb_queue_empty(&npinfo->txq)) { |