aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-08-12 12:22:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-12 12:22:34 -0400
commit63788ea9245688772d13b979bea05bd72b239aad (patch)
tree13f00cc8f6da1ed6c583947652bbedb2a0e27693
parent349188f66da2fd88f8cb2407763051d8e136c9aa (diff)
parentd7b9dfc8ea43936e6e8eec3040dcf4f110563868 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--include/linux/netpoll.h20
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/netpoll.c63
5 files changed, 59 insertions, 36 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e5d2c3c7ce4..b82fd15d0891 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev)
3789 struct e1000_adapter *adapter = netdev_priv(netdev); 3789 struct e1000_adapter *adapter = netdev_priv(netdev);
3790 disable_irq(adapter->pdev->irq); 3790 disable_irq(adapter->pdev->irq);
3791 e1000_intr(adapter->pdev->irq, netdev, NULL); 3791 e1000_intr(adapter->pdev->irq, netdev, NULL);
3792 e1000_clean_tx_irq(adapter);
3792 enable_irq(adapter->pdev->irq); 3793 enable_irq(adapter->pdev->irq);
3793} 3794}
3794#endif 3795#endif
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index bcd0ac33f592..5ade54a78dbb 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/rcupdate.h>
12#include <linux/list.h> 13#include <linux/list.h>
13 14
14struct netpoll; 15struct netpoll;
@@ -26,6 +27,7 @@ struct netpoll {
26struct netpoll_info { 27struct netpoll_info {
27 spinlock_t poll_lock; 28 spinlock_t poll_lock;
28 int poll_owner; 29 int poll_owner;
30 int tries;
29 int rx_flags; 31 int rx_flags;
30 spinlock_t rx_lock; 32 spinlock_t rx_lock;
31 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
@@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb)
60 return ret; 62 return ret;
61} 63}
62 64
63static inline void netpoll_poll_lock(struct net_device *dev) 65static inline void *netpoll_poll_lock(struct net_device *dev)
64{ 66{
67 rcu_read_lock(); /* deal with race on ->npinfo */
65 if (dev->npinfo) { 68 if (dev->npinfo) {
66 spin_lock(&dev->npinfo->poll_lock); 69 spin_lock(&dev->npinfo->poll_lock);
67 dev->npinfo->poll_owner = smp_processor_id(); 70 dev->npinfo->poll_owner = smp_processor_id();
71 return dev->npinfo;
68 } 72 }
73 return NULL;
69} 74}
70 75
71static inline void netpoll_poll_unlock(struct net_device *dev) 76static inline void netpoll_poll_unlock(void *have)
72{ 77{
73 if (dev->npinfo) { 78 struct netpoll_info *npi = have;
74 dev->npinfo->poll_owner = -1; 79
75 spin_unlock(&dev->npinfo->poll_lock); 80 if (npi) {
81 npi->poll_owner = -1;
82 spin_unlock(&npi->poll_lock);
76 } 83 }
84 rcu_read_unlock();
77} 85}
78 86
79#else 87#else
80#define netpoll_rx(a) 0 88#define netpoll_rx(a) 0
81#define netpoll_poll_lock(a) 89#define netpoll_poll_lock(a) 0
82#define netpoll_poll_unlock(a) 90#define netpoll_poll_unlock(a)
83#endif 91#endif
84 92
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0061c9470482..948527e42a60 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -255,7 +255,7 @@ struct sk_buff {
255 nohdr:1; 255 nohdr:1;
256 /* 3 bits spare */ 256 /* 3 bits spare */
257 __u8 pkt_type; 257 __u8 pkt_type;
258 __u16 protocol; 258 __be16 protocol;
259 259
260 void (*destructor)(struct sk_buff *skb); 260 void (*destructor)(struct sk_buff *skb);
261#ifdef CONFIG_NETFILTER 261#ifdef CONFIG_NETFILTER
diff --git a/net/core/dev.c b/net/core/dev.c
index 52a3bf7ae177..faf59b02c4bf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1696,7 +1696,8 @@ static void net_rx_action(struct softirq_action *h)
1696 struct softnet_data *queue = &__get_cpu_var(softnet_data); 1696 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1697 unsigned long start_time = jiffies; 1697 unsigned long start_time = jiffies;
1698 int budget = netdev_budget; 1698 int budget = netdev_budget;
1699 1699 void *have;
1700
1700 local_irq_disable(); 1701 local_irq_disable();
1701 1702
1702 while (!list_empty(&queue->poll_list)) { 1703 while (!list_empty(&queue->poll_list)) {
@@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h)
1709 1710
1710 dev = list_entry(queue->poll_list.next, 1711 dev = list_entry(queue->poll_list.next,
1711 struct net_device, poll_list); 1712 struct net_device, poll_list);
1712 netpoll_poll_lock(dev); 1713 have = netpoll_poll_lock(dev);
1713 1714
1714 if (dev->quota <= 0 || dev->poll(dev, &budget)) { 1715 if (dev->quota <= 0 || dev->poll(dev, &budget)) {
1715 netpoll_poll_unlock(dev); 1716 netpoll_poll_unlock(have);
1716 local_irq_disable(); 1717 local_irq_disable();
1717 list_del(&dev->poll_list); 1718 list_del(&dev->poll_list);
1718 list_add_tail(&dev->poll_list, &queue->poll_list); 1719 list_add_tail(&dev->poll_list, &queue->poll_list);
@@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h)
1721 else 1722 else
1722 dev->quota = dev->weight; 1723 dev->quota = dev->weight;
1723 } else { 1724 } else {
1724 netpoll_poll_unlock(dev); 1725 netpoll_poll_unlock(have);
1725 dev_put(dev); 1726 dev_put(dev);
1726 local_irq_disable(); 1727 local_irq_disable();
1727 } 1728 }
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c327c9edadc5..a1a9a7abff50 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -33,6 +33,7 @@
33#define MAX_UDP_CHUNK 1460 33#define MAX_UDP_CHUNK 1460
34#define MAX_SKBS 32 34#define MAX_SKBS 32
35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36#define MAX_RETRIES 20000
36 37
37static DEFINE_SPINLOCK(skb_list_lock); 38static DEFINE_SPINLOCK(skb_list_lock);
38static int nr_skbs; 39static int nr_skbs;
@@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
248 int status; 249 int status;
249 struct netpoll_info *npinfo; 250 struct netpoll_info *npinfo;
250 251
251repeat: 252 if (!np || !np->dev || !netif_running(np->dev)) {
252 if(!np || !np->dev || !netif_running(np->dev)) {
253 __kfree_skb(skb); 253 __kfree_skb(skb);
254 return; 254 return;
255 } 255 }
256 256
257 /* avoid recursion */
258 npinfo = np->dev->npinfo; 257 npinfo = np->dev->npinfo;
258
259 /* avoid recursion */
259 if (npinfo->poll_owner == smp_processor_id() || 260 if (npinfo->poll_owner == smp_processor_id() ||
260 np->dev->xmit_lock_owner == smp_processor_id()) { 261 np->dev->xmit_lock_owner == smp_processor_id()) {
261 if (np->drop) 262 if (np->drop)
@@ -265,30 +266,37 @@ repeat:
265 return; 266 return;
266 } 267 }
267 268
268 spin_lock(&np->dev->xmit_lock); 269 do {
269 np->dev->xmit_lock_owner = smp_processor_id(); 270 npinfo->tries--;
271 spin_lock(&np->dev->xmit_lock);
272 np->dev->xmit_lock_owner = smp_processor_id();
270 273
271 /* 274 /*
272 * network drivers do not expect to be called if the queue is 275 * network drivers do not expect to be called if the queue is
273 * stopped. 276 * stopped.
274 */ 277 */
275 if (netif_queue_stopped(np->dev)) { 278 if (netif_queue_stopped(np->dev)) {
279 np->dev->xmit_lock_owner = -1;
280 spin_unlock(&np->dev->xmit_lock);
281 netpoll_poll(np);
282 udelay(50);
283 continue;
284 }
285
286 status = np->dev->hard_start_xmit(skb, np->dev);
276 np->dev->xmit_lock_owner = -1; 287 np->dev->xmit_lock_owner = -1;
277 spin_unlock(&np->dev->xmit_lock); 288 spin_unlock(&np->dev->xmit_lock);
278 289
279 netpoll_poll(np); 290 /* success */
280 goto repeat; 291 if(!status) {
281 } 292 npinfo->tries = MAX_RETRIES; /* reset */
282 293 return;
283 status = np->dev->hard_start_xmit(skb, np->dev); 294 }
284 np->dev->xmit_lock_owner = -1;
285 spin_unlock(&np->dev->xmit_lock);
286 295
287 /* transmit busy */ 296 /* transmit busy */
288 if(status) {
289 netpoll_poll(np); 297 netpoll_poll(np);
290 goto repeat; 298 udelay(50);
291 } 299 } while (npinfo->tries > 0);
292} 300}
293 301
294void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 302void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb)
349 unsigned char *arp_ptr; 357 unsigned char *arp_ptr;
350 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 358 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
351 u32 sip, tip; 359 u32 sip, tip;
352 unsigned long flags;
353 struct sk_buff *send_skb; 360 struct sk_buff *send_skb;
354 struct netpoll *np = NULL; 361 struct netpoll *np = NULL;
355 362
356 spin_lock_irqsave(&npinfo->rx_lock, flags);
357 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 363 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
358 np = npinfo->rx_np; 364 np = npinfo->rx_np;
359 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
360
361 if (!np) 365 if (!np)
362 return; 366 return;
363 367
@@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np)
639 if (!npinfo) 643 if (!npinfo)
640 goto release; 644 goto release;
641 645
646 npinfo->rx_flags = 0;
642 npinfo->rx_np = NULL; 647 npinfo->rx_np = NULL;
643 npinfo->poll_lock = SPIN_LOCK_UNLOCKED; 648 npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
644 npinfo->poll_owner = -1; 649 npinfo->poll_owner = -1;
650 npinfo->tries = MAX_RETRIES;
645 npinfo->rx_lock = SPIN_LOCK_UNLOCKED; 651 npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
646 } else 652 } else
647 npinfo = ndev->npinfo; 653 npinfo = ndev->npinfo;
@@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np)
718 npinfo->rx_np = np; 724 npinfo->rx_np = np;
719 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 725 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
720 } 726 }
727
728 /* fill up the skb queue */
729 refill_skbs();
730
721 /* last thing to do is link it to the net device structure */ 731 /* last thing to do is link it to the net device structure */
722 ndev->npinfo = npinfo; 732 ndev->npinfo = npinfo;
723 733
734 /* avoid racing with NAPI reading npinfo */
735 synchronize_rcu();
736
724 return 0; 737 return 0;
725 738
726 release: 739 release: