diff options
| author | Changli Gao <xiaosuo@gmail.com> | 2010-03-30 16:16:22 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2010-04-01 21:41:40 -0400 |
| commit | 152102c7f2bf191690f1069bae292ea3925adf14 (patch) | |
| tree | 093ce1204a81acd76623a567b821661c504bc575 | |
| parent | 630b943c182d1aed69f244405131902fbcba7ec6 (diff) | |
rps: keep the old behavior on SMP without rps
keep the old behavior on SMP without rps
RPS introduces a lock operation to per cpu variable input_pkt_queue on
SMP whenever rps is enabled or not. On SMP without RPS, this lock isn't
needed at all.
Signed-off-by: Changli Gao <xiaosuo@gmail.com>
----
net/core/dev.c | 42 ++++++++++++++++++++++++++++--------------
1 file changed, 28 insertions(+), 14 deletions(-)
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | net/core/dev.c | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 887aa84fcd46..427cd53c118d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -206,6 +206,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) | |||
| 206 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; | 206 | return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | static inline void rps_lock(struct softnet_data *queue) | ||
| 210 | { | ||
| 211 | #ifdef CONFIG_RPS | ||
| 212 | spin_lock(&queue->input_pkt_queue.lock); | ||
| 213 | #endif | ||
| 214 | } | ||
| 215 | |||
| 216 | static inline void rps_unlock(struct softnet_data *queue) | ||
| 217 | { | ||
| 218 | #ifdef CONFIG_RPS | ||
| 219 | spin_unlock(&queue->input_pkt_queue.lock); | ||
| 220 | #endif | ||
| 221 | } | ||
| 222 | |||
| 209 | /* Device list insertion */ | 223 | /* Device list insertion */ |
| 210 | static int list_netdevice(struct net_device *dev) | 224 | static int list_netdevice(struct net_device *dev) |
| 211 | { | 225 | { |
| @@ -2313,13 +2327,13 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu) | |||
| 2313 | local_irq_save(flags); | 2327 | local_irq_save(flags); |
| 2314 | __get_cpu_var(netdev_rx_stat).total++; | 2328 | __get_cpu_var(netdev_rx_stat).total++; |
| 2315 | 2329 | ||
| 2316 | spin_lock(&queue->input_pkt_queue.lock); | 2330 | rps_lock(queue); |
| 2317 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | 2331 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { |
| 2318 | if (queue->input_pkt_queue.qlen) { | 2332 | if (queue->input_pkt_queue.qlen) { |
| 2319 | enqueue: | 2333 | enqueue: |
| 2320 | __skb_queue_tail(&queue->input_pkt_queue, skb); | 2334 | __skb_queue_tail(&queue->input_pkt_queue, skb); |
| 2321 | spin_unlock_irqrestore(&queue->input_pkt_queue.lock, | 2335 | rps_unlock(queue); |
| 2322 | flags); | 2336 | local_irq_restore(flags); |
| 2323 | return NET_RX_SUCCESS; | 2337 | return NET_RX_SUCCESS; |
| 2324 | } | 2338 | } |
| 2325 | 2339 | ||
| @@ -2341,7 +2355,7 @@ enqueue: | |||
| 2341 | goto enqueue; | 2355 | goto enqueue; |
| 2342 | } | 2356 | } |
| 2343 | 2357 | ||
| 2344 | spin_unlock(&queue->input_pkt_queue.lock); | 2358 | rps_unlock(queue); |
| 2345 | 2359 | ||
| 2346 | __get_cpu_var(netdev_rx_stat).dropped++; | 2360 | __get_cpu_var(netdev_rx_stat).dropped++; |
| 2347 | local_irq_restore(flags); | 2361 | local_irq_restore(flags); |
| @@ -2766,19 +2780,19 @@ int netif_receive_skb(struct sk_buff *skb) | |||
| 2766 | EXPORT_SYMBOL(netif_receive_skb); | 2780 | EXPORT_SYMBOL(netif_receive_skb); |
| 2767 | 2781 | ||
| 2768 | /* Network device is going away, flush any packets still pending */ | 2782 | /* Network device is going away, flush any packets still pending */ |
| 2769 | static void flush_backlog(struct net_device *dev, int cpu) | 2783 | static void flush_backlog(void *arg) |
| 2770 | { | 2784 | { |
| 2771 | struct softnet_data *queue = &per_cpu(softnet_data, cpu); | 2785 | struct net_device *dev = arg; |
| 2786 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | ||
| 2772 | struct sk_buff *skb, *tmp; | 2787 | struct sk_buff *skb, *tmp; |
| 2773 | unsigned long flags; | ||
| 2774 | 2788 | ||
| 2775 | spin_lock_irqsave(&queue->input_pkt_queue.lock, flags); | 2789 | rps_lock(queue); |
| 2776 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | 2790 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) |
| 2777 | if (skb->dev == dev) { | 2791 | if (skb->dev == dev) { |
| 2778 | __skb_unlink(skb, &queue->input_pkt_queue); | 2792 | __skb_unlink(skb, &queue->input_pkt_queue); |
| 2779 | kfree_skb(skb); | 2793 | kfree_skb(skb); |
| 2780 | } | 2794 | } |
| 2781 | spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags); | 2795 | rps_unlock(queue); |
| 2782 | } | 2796 | } |
| 2783 | 2797 | ||
| 2784 | static int napi_gro_complete(struct sk_buff *skb) | 2798 | static int napi_gro_complete(struct sk_buff *skb) |
| @@ -3091,14 +3105,16 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
| 3091 | do { | 3105 | do { |
| 3092 | struct sk_buff *skb; | 3106 | struct sk_buff *skb; |
| 3093 | 3107 | ||
| 3094 | spin_lock_irq(&queue->input_pkt_queue.lock); | 3108 | local_irq_disable(); |
| 3109 | rps_lock(queue); | ||
| 3095 | skb = __skb_dequeue(&queue->input_pkt_queue); | 3110 | skb = __skb_dequeue(&queue->input_pkt_queue); |
| 3096 | if (!skb) { | 3111 | if (!skb) { |
| 3097 | __napi_complete(napi); | 3112 | __napi_complete(napi); |
| 3098 | spin_unlock_irq(&queue->input_pkt_queue.lock); | 3113 | spin_unlock_irq(&queue->input_pkt_queue.lock); |
| 3099 | break; | 3114 | break; |
| 3100 | } | 3115 | } |
| 3101 | spin_unlock_irq(&queue->input_pkt_queue.lock); | 3116 | rps_unlock(queue); |
| 3117 | local_irq_enable(); | ||
| 3102 | 3118 | ||
| 3103 | __netif_receive_skb(skb); | 3119 | __netif_receive_skb(skb); |
| 3104 | } while (++work < quota && jiffies == start_time); | 3120 | } while (++work < quota && jiffies == start_time); |
| @@ -5548,7 +5564,6 @@ void netdev_run_todo(void) | |||
| 5548 | while (!list_empty(&list)) { | 5564 | while (!list_empty(&list)) { |
| 5549 | struct net_device *dev | 5565 | struct net_device *dev |
| 5550 | = list_first_entry(&list, struct net_device, todo_list); | 5566 | = list_first_entry(&list, struct net_device, todo_list); |
| 5551 | int i; | ||
| 5552 | list_del(&dev->todo_list); | 5567 | list_del(&dev->todo_list); |
| 5553 | 5568 | ||
| 5554 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { | 5569 | if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { |
| @@ -5560,8 +5575,7 @@ void netdev_run_todo(void) | |||
| 5560 | 5575 | ||
| 5561 | dev->reg_state = NETREG_UNREGISTERED; | 5576 | dev->reg_state = NETREG_UNREGISTERED; |
| 5562 | 5577 | ||
| 5563 | for_each_online_cpu(i) | 5578 | on_each_cpu(flush_backlog, dev, 1); |
| 5564 | flush_backlog(dev, i); | ||
| 5565 | 5579 | ||
| 5566 | netdev_wait_allrefs(dev); | 5580 | netdev_wait_allrefs(dev); |
| 5567 | 5581 | ||
