diff options
author | Stephen Hemminger <shemminger@vyatta.com> | 2008-08-04 00:29:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-04 00:29:57 -0400 |
commit | 6e583ce5242f32e925dcb198f7123256d0798370 (patch) | |
tree | 9bf826ddc1c2826015a6d59141f7c53e094b0204 /net/core | |
parent | 283d07ac201ee9f8aa6dc6f7519436b48760baff (diff) |
net: eliminate refcounting in backlog queue
Avoid the overhead of atomic increment/decrement on each received packet.
This helps performance of non-NAPI devices (like loopback).
Use cleanup function to walk queue on each cpu and clean out any
left over packets.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 23 |
1 files changed, 16 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index cbf80098980c..fc6c9881eca8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1909,7 +1909,6 @@ int netif_rx(struct sk_buff *skb) | |||
1909 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | 1909 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { |
1910 | if (queue->input_pkt_queue.qlen) { | 1910 | if (queue->input_pkt_queue.qlen) { |
1911 | enqueue: | 1911 | enqueue: |
1912 | dev_hold(skb->dev); | ||
1913 | __skb_queue_tail(&queue->input_pkt_queue, skb); | 1912 | __skb_queue_tail(&queue->input_pkt_queue, skb); |
1914 | local_irq_restore(flags); | 1913 | local_irq_restore(flags); |
1915 | return NET_RX_SUCCESS; | 1914 | return NET_RX_SUCCESS; |
@@ -2270,6 +2269,20 @@ out: | |||
2270 | return ret; | 2269 | return ret; |
2271 | } | 2270 | } |
2272 | 2271 | ||
2272 | /* Network device is going away, flush any packets still pending */ | ||
2273 | static void flush_backlog(void *arg) | ||
2274 | { | ||
2275 | struct net_device *dev = arg; | ||
2276 | struct softnet_data *queue = &__get_cpu_var(softnet_data); | ||
2277 | struct sk_buff *skb, *tmp; | ||
2278 | |||
2279 | skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) | ||
2280 | if (skb->dev == dev) { | ||
2281 | __skb_unlink(skb, &queue->input_pkt_queue); | ||
2282 | kfree_skb(skb); | ||
2283 | } | ||
2284 | } | ||
2285 | |||
2273 | static int process_backlog(struct napi_struct *napi, int quota) | 2286 | static int process_backlog(struct napi_struct *napi, int quota) |
2274 | { | 2287 | { |
2275 | int work = 0; | 2288 | int work = 0; |
@@ -2279,7 +2292,6 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2279 | napi->weight = weight_p; | 2292 | napi->weight = weight_p; |
2280 | do { | 2293 | do { |
2281 | struct sk_buff *skb; | 2294 | struct sk_buff *skb; |
2282 | struct net_device *dev; | ||
2283 | 2295 | ||
2284 | local_irq_disable(); | 2296 | local_irq_disable(); |
2285 | skb = __skb_dequeue(&queue->input_pkt_queue); | 2297 | skb = __skb_dequeue(&queue->input_pkt_queue); |
@@ -2288,14 +2300,9 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2288 | local_irq_enable(); | 2300 | local_irq_enable(); |
2289 | break; | 2301 | break; |
2290 | } | 2302 | } |
2291 | |||
2292 | local_irq_enable(); | 2303 | local_irq_enable(); |
2293 | 2304 | ||
2294 | dev = skb->dev; | ||
2295 | |||
2296 | netif_receive_skb(skb); | 2305 | netif_receive_skb(skb); |
2297 | |||
2298 | dev_put(dev); | ||
2299 | } while (++work < quota && jiffies == start_time); | 2306 | } while (++work < quota && jiffies == start_time); |
2300 | 2307 | ||
2301 | return work; | 2308 | return work; |
@@ -4169,6 +4176,8 @@ void netdev_run_todo(void) | |||
4169 | 4176 | ||
4170 | dev->reg_state = NETREG_UNREGISTERED; | 4177 | dev->reg_state = NETREG_UNREGISTERED; |
4171 | 4178 | ||
4179 | on_each_cpu(flush_backlog, dev, 1); | ||
4180 | |||
4172 | netdev_wait_allrefs(dev); | 4181 | netdev_wait_allrefs(dev); |
4173 | 4182 | ||
4174 | /* paranoia */ | 4183 | /* paranoia */ |