aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-05-19 19:16:03 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-31 03:24:01 -0400
commit15e83ed78864d0625e87a85f09b297c0919a4797 (patch)
treea138efc4400c0857a9728f7ebbfe4f5c4fb71ad1 /net/core/netpoll.c
parent27f39c73e63833b4c081a0d681d88b4184a0491d (diff)
net: remove zap_completion_queue
netpoll does an interesting work in zap_completion_queue(), but this was before we did skb orphaning before delivering packets to device. It now makes sense to add a test in dev_kfree_skb_irq() to not queue a skb if already orphaned, and to remove netpoll zap_completion_queue() as a bonus. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c31
1 files changed, 0 insertions, 31 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 94825b109551..e034342c819c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -49,7 +49,6 @@ static atomic_t trapped;
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr)) 50 sizeof(struct iphdr) + sizeof(struct ethhdr))
51 51
52static void zap_completion_queue(void);
53static void arp_reply(struct sk_buff *skb); 52static void arp_reply(struct sk_buff *skb);
54 53
55static unsigned int carrier_timeout = 4; 54static unsigned int carrier_timeout = 4;
@@ -197,7 +196,6 @@ void netpoll_poll_dev(struct net_device *dev)
197 196
198 service_arp_queue(dev->npinfo); 197 service_arp_queue(dev->npinfo);
199 198
200 zap_completion_queue();
201} 199}
202 200
203void netpoll_poll(struct netpoll *np) 201void netpoll_poll(struct netpoll *np)
@@ -221,40 +219,11 @@ static void refill_skbs(void)
221 spin_unlock_irqrestore(&skb_pool.lock, flags); 219 spin_unlock_irqrestore(&skb_pool.lock, flags);
222} 220}
223 221
224static void zap_completion_queue(void)
225{
226 unsigned long flags;
227 struct softnet_data *sd = &get_cpu_var(softnet_data);
228
229 if (sd->completion_queue) {
230 struct sk_buff *clist;
231
232 local_irq_save(flags);
233 clist = sd->completion_queue;
234 sd->completion_queue = NULL;
235 local_irq_restore(flags);
236
237 while (clist != NULL) {
238 struct sk_buff *skb = clist;
239 clist = clist->next;
240 if (skb->destructor) {
241 atomic_inc(&skb->users);
242 dev_kfree_skb_any(skb); /* put this one back */
243 } else {
244 __kfree_skb(skb);
245 }
246 }
247 }
248
249 put_cpu_var(softnet_data);
250}
251
252static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) 222static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
253{ 223{
254 int count = 0; 224 int count = 0;
255 struct sk_buff *skb; 225 struct sk_buff *skb;
256 226
257 zap_completion_queue();
258 refill_skbs(); 227 refill_skbs();
259repeat: 228repeat:
260 229