diff options
author | Tom Herbert <therbert@google.com> | 2010-03-23 09:39:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-03-24 02:17:18 -0400 |
commit | e51d739ab79110c43ca03daf3ddb3c52dadd38b7 (patch) | |
tree | b15bdd3cb58054cf052d821277408086a1cd7d0e | |
parent | ec43b1a64a132303a6800c781bc17c683aedc55b (diff) |
net: Fix locking in flush_backlog
Need to take spinlocks when dequeuing from input_pkt_queue in flush_backlog.
Also, flush_backlog can now be called directly from netdev_run_todo.
Signed-off-by: Tom Herbert <therbert@google.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/core/dev.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index a03aab45e84f..5e3dc28cbf5a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2766,17 +2766,19 @@ int netif_receive_skb(struct sk_buff *skb) | |||
2766 | EXPORT_SYMBOL(netif_receive_skb); | 2766 | EXPORT_SYMBOL(netif_receive_skb); |
2767 | 2767 | ||
2768 | /* Network device is going away, flush any packets still pending */ | 2768 | /* Network device is going away, flush any packets still pending */ |
2769 | static void flush_backlog(void *arg) | 2769 | static void flush_backlog(struct net_device *dev, int cpu) |
2770 | { | 2770 | { |
2771 | struct net_device *dev = arg; | 2771 | struct softnet_data *queue = &per_cpu(softnet_data, cpu); |
2772 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | ||
2773 | struct sk_buff *skb, *tmp; | 2772 | struct sk_buff *skb, *tmp; |
2773 | unsigned long flags; | ||
2774 | 2774 | ||
2775 | spin_lock_irqsave(&queue->input_pkt_queue.lock, flags); | ||
2775 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | 2776 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) |
2776 | if (skb->dev == dev) { | 2777 | if (skb->dev == dev) { |
2777 | __skb_unlink(skb, &queue->input_pkt_queue); | 2778 | __skb_unlink(skb, &queue->input_pkt_queue); |
2778 | kfree_skb(skb); | 2779 | kfree_skb(skb); |
2779 | } | 2780 | } |
2781 | spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags); | ||
2780 | } | 2782 | } |
2781 | 2783 | ||
2782 | static int napi_gro_complete(struct sk_buff *skb) | 2784 | static int napi_gro_complete(struct sk_buff *skb) |
@@ -5545,6 +5547,7 @@ void netdev_run_todo(void) | |||
5545 | while (!list_empty(&list)) { | 5547 | while (!list_empty(&list)) { |
5546 | struct net_device *dev | 5548 | struct net_device *dev |
5547 | = list_first_entry(&list, struct net_device, todo_list); | 5549 | = list_first_entry(&list, struct net_device, todo_list); |
5550 | int i; | ||
5548 | list_del(&dev->todo_list); | 5551 | list_del(&dev->todo_list); |
5549 | 5552 | ||
5550 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { | 5553 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
@@ -5556,7 +5559,8 @@ void netdev_run_todo(void) | |||
5556 | 5559 | ||
5557 | dev->reg_state = NETREG_UNREGISTERED; | 5560 | dev->reg_state = NETREG_UNREGISTERED; |
5558 | 5561 | ||
5559 | on_each_cpu(flush_backlog, dev, 1); | 5562 | for_each_online_cpu(i) |
5563 | flush_backlog(dev, i); | ||
5560 | 5564 | ||
5561 | netdev_wait_allrefs(dev); | 5565 | netdev_wait_allrefs(dev); |
5562 | 5566 | ||