diff options
author | David S. Miller <davem@davemloft.net> | 2010-08-03 03:24:04 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-08-03 03:24:04 -0400 |
commit | 3578b0c8abc7bdb4f02152ce5db7e09d484c6866 (patch) | |
tree | a884f8467e5700b798d52a038d56f99ee5bd73f1 /net | |
parent | a427615e0420f179eab801b929111abaadea2ed3 (diff) |
Revert "net: remove zap_completion_queue"
This reverts commit 15e83ed78864d0625e87a85f09b297c0919a4797.
As explained by Johannes Berg, the optimization made here is
invalid. Or, at best, incomplete.
Not only destructor invocation, but conntract entry releasing
must be executed outside of hw IRQ context.
So just checking "skb->destructor" is insufficient.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 4 | ||||
-rw-r--r-- | net/core/netpoll.c | 31 |
2 files changed, 32 insertions, 3 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 8c663dbf1d77..e1c1cdcc2bb0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1591,9 +1591,7 @@ EXPORT_SYMBOL(__netif_schedule); | |||
1591 | 1591 | ||
1592 | void dev_kfree_skb_irq(struct sk_buff *skb) | 1592 | void dev_kfree_skb_irq(struct sk_buff *skb) |
1593 | { | 1593 | { |
1594 | if (!skb->destructor) | 1594 | if (atomic_dec_and_test(&skb->users)) { |
1595 | dev_kfree_skb(skb); | ||
1596 | else if (atomic_dec_and_test(&skb->users)) { | ||
1597 | struct softnet_data *sd; | 1595 | struct softnet_data *sd; |
1598 | unsigned long flags; | 1596 | unsigned long flags; |
1599 | 1597 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c2b7a8bed8f6..537e01afd81b 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -49,6 +49,7 @@ static atomic_t trapped; | |||
49 | (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ | 49 | (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ |
50 | sizeof(struct iphdr) + sizeof(struct ethhdr)) | 50 | sizeof(struct iphdr) + sizeof(struct ethhdr)) |
51 | 51 | ||
52 | static void zap_completion_queue(void); | ||
52 | static void arp_reply(struct sk_buff *skb); | 53 | static void arp_reply(struct sk_buff *skb); |
53 | 54 | ||
54 | static unsigned int carrier_timeout = 4; | 55 | static unsigned int carrier_timeout = 4; |
@@ -196,6 +197,7 @@ void netpoll_poll_dev(struct net_device *dev) | |||
196 | 197 | ||
197 | service_arp_queue(dev->npinfo); | 198 | service_arp_queue(dev->npinfo); |
198 | 199 | ||
200 | zap_completion_queue(); | ||
199 | } | 201 | } |
200 | EXPORT_SYMBOL(netpoll_poll_dev); | 202 | EXPORT_SYMBOL(netpoll_poll_dev); |
201 | 203 | ||
@@ -221,11 +223,40 @@ static void refill_skbs(void) | |||
221 | spin_unlock_irqrestore(&skb_pool.lock, flags); | 223 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
222 | } | 224 | } |
223 | 225 | ||
226 | static void zap_completion_queue(void) | ||
227 | { | ||
228 | unsigned long flags; | ||
229 | struct softnet_data *sd = &get_cpu_var(softnet_data); | ||
230 | |||
231 | if (sd->completion_queue) { | ||
232 | struct sk_buff *clist; | ||
233 | |||
234 | local_irq_save(flags); | ||
235 | clist = sd->completion_queue; | ||
236 | sd->completion_queue = NULL; | ||
237 | local_irq_restore(flags); | ||
238 | |||
239 | while (clist != NULL) { | ||
240 | struct sk_buff *skb = clist; | ||
241 | clist = clist->next; | ||
242 | if (skb->destructor) { | ||
243 | atomic_inc(&skb->users); | ||
244 | dev_kfree_skb_any(skb); /* put this one back */ | ||
245 | } else { | ||
246 | __kfree_skb(skb); | ||
247 | } | ||
248 | } | ||
249 | } | ||
250 | |||
251 | put_cpu_var(softnet_data); | ||
252 | } | ||
253 | |||
224 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) | 254 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
225 | { | 255 | { |
226 | int count = 0; | 256 | int count = 0; |
227 | struct sk_buff *skb; | 257 | struct sk_buff *skb; |
228 | 258 | ||
259 | zap_completion_queue(); | ||
229 | refill_skbs(); | 260 | refill_skbs(); |
230 | repeat: | 261 | repeat: |
231 | 262 | ||