aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/netpoll.c31
2 files changed, 32 insertions, 3 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 8c663dbf1d77..e1c1cdcc2bb0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1591,9 +1591,7 @@ EXPORT_SYMBOL(__netif_schedule);
1591 1591
1592void dev_kfree_skb_irq(struct sk_buff *skb) 1592void dev_kfree_skb_irq(struct sk_buff *skb)
1593{ 1593{
1594 if (!skb->destructor) 1594 if (atomic_dec_and_test(&skb->users)) {
1595 dev_kfree_skb(skb);
1596 else if (atomic_dec_and_test(&skb->users)) {
1597 struct softnet_data *sd; 1595 struct softnet_data *sd;
1598 unsigned long flags; 1596 unsigned long flags;
1599 1597
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c2b7a8bed8f6..537e01afd81b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -49,6 +49,7 @@ static atomic_t trapped;
49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 49 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
50 sizeof(struct iphdr) + sizeof(struct ethhdr)) 50 sizeof(struct iphdr) + sizeof(struct ethhdr))
51 51
52static void zap_completion_queue(void);
52static void arp_reply(struct sk_buff *skb); 53static void arp_reply(struct sk_buff *skb);
53 54
54static unsigned int carrier_timeout = 4; 55static unsigned int carrier_timeout = 4;
@@ -196,6 +197,7 @@ void netpoll_poll_dev(struct net_device *dev)
196 197
197 service_arp_queue(dev->npinfo); 198 service_arp_queue(dev->npinfo);
198 199
200 zap_completion_queue();
199} 201}
200EXPORT_SYMBOL(netpoll_poll_dev); 202EXPORT_SYMBOL(netpoll_poll_dev);
201 203
@@ -221,11 +223,40 @@ static void refill_skbs(void)
221 spin_unlock_irqrestore(&skb_pool.lock, flags); 223 spin_unlock_irqrestore(&skb_pool.lock, flags);
222} 224}
223 225
226static void zap_completion_queue(void)
227{
228 unsigned long flags;
229 struct softnet_data *sd = &get_cpu_var(softnet_data);
230
231 if (sd->completion_queue) {
232 struct sk_buff *clist;
233
234 local_irq_save(flags);
235 clist = sd->completion_queue;
236 sd->completion_queue = NULL;
237 local_irq_restore(flags);
238
239 while (clist != NULL) {
240 struct sk_buff *skb = clist;
241 clist = clist->next;
242 if (skb->destructor) {
243 atomic_inc(&skb->users);
244 dev_kfree_skb_any(skb); /* put this one back */
245 } else {
246 __kfree_skb(skb);
247 }
248 }
249 }
250
251 put_cpu_var(softnet_data);
252}
253
224static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) 254static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
225{ 255{
226 int count = 0; 256 int count = 0;
227 struct sk_buff *skb; 257 struct sk_buff *skb;
228 258
259 zap_completion_queue();
229 refill_skbs(); 260 refill_skbs();
230repeat: 261repeat:
231 262