aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-10-26 18:46:54 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:22:36 -0500
commit2bdfe0baeca0e2750037b8fba71905c00ac3c515 (patch)
treefeb34f19d0f280fe4b54bc49e49ab05ea7f085c7
parent6c43ff18f91e54aa7555d8ae4f26eab7da5bce68 (diff)
netpoll retry cleanup
The netpoll beast was still not happy. If the beast got clogged pipes, it tended to stare blankly off in space for a long time. The problem couldn't be completely fixed because the beast talked with irq's disabled. But it could be made less painful and shorter. Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
-rw-r--r--include/linux/netpoll.h1
-rw-r--r--net/core/netpoll.c71
2 files changed, 33 insertions, 39 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 93a8b7664423..c65d12ec7bb0 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -28,7 +28,6 @@ struct netpoll_info {
28 atomic_t refcnt; 28 atomic_t refcnt;
29 spinlock_t poll_lock; 29 spinlock_t poll_lock;
30 int poll_owner; 30 int poll_owner;
31 int tries;
32 int rx_flags; 31 int rx_flags;
33 spinlock_t rx_lock; 32 spinlock_t rx_lock;
34 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 93cb828f3aaf..6b34c394672f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -34,12 +34,12 @@
34#define MAX_UDP_CHUNK 1460 34#define MAX_UDP_CHUNK 1460
35#define MAX_SKBS 32 35#define MAX_SKBS 32
36#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 36#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
37#define MAX_RETRIES 20000
38 37
39static struct sk_buff_head skb_pool; 38static struct sk_buff_head skb_pool;
40 39
41static atomic_t trapped; 40static atomic_t trapped;
42 41
42#define USEC_PER_POLL 50
43#define NETPOLL_RX_ENABLED 1 43#define NETPOLL_RX_ENABLED 1
44#define NETPOLL_RX_DROP 2 44#define NETPOLL_RX_DROP 2
45 45
@@ -72,6 +72,7 @@ static void queue_process(void *p)
72 schedule_delayed_work(&npinfo->tx_work, HZ/10); 72 schedule_delayed_work(&npinfo->tx_work, HZ/10);
73 return; 73 return;
74 } 74 }
75
75 netif_tx_unlock_bh(dev); 76 netif_tx_unlock_bh(dev);
76 } 77 }
77} 78}
@@ -244,50 +245,44 @@ repeat:
244 245
245static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 246static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
246{ 247{
247 int status; 248 int status = NETDEV_TX_BUSY;
248 struct netpoll_info *npinfo; 249 unsigned long tries;
250 struct net_device *dev = np->dev;
251 struct netpoll_info *npinfo = np->dev->npinfo;
252
253 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
254 __kfree_skb(skb);
255 return;
256 }
257
258 /* don't get messages out of order, and no recursion */
259 if ( !(np->drop == netpoll_queue && skb_queue_len(&npinfo->txq))
260 && npinfo->poll_owner != smp_processor_id()
261 && netif_tx_trylock(dev)) {
262
263 /* try until next clock tick */
264 for(tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
265 if (!netif_queue_stopped(dev))
266 status = dev->hard_start_xmit(skb, dev);
267
268 if (status == NETDEV_TX_OK)
269 break;
249 270
250 if (!np || !np->dev || !netif_running(np->dev)) { 271 /* tickle device maybe there is some cleanup */
251 __kfree_skb(skb); 272 netpoll_poll(np);
252 return;
253 }
254 273
255 npinfo = np->dev->npinfo; 274 udelay(USEC_PER_POLL);
275 }
276 netif_tx_unlock(dev);
277 }
256 278
257 /* avoid recursion */ 279 if (status != NETDEV_TX_OK) {
258 if (npinfo->poll_owner == smp_processor_id() || 280 /* requeue for later */
259 np->dev->xmit_lock_owner == smp_processor_id()) {
260 if (np->drop) 281 if (np->drop)
261 np->drop(skb); 282 np->drop(skb);
262 else 283 else
263 __kfree_skb(skb); 284 __kfree_skb(skb);
264 return;
265 } 285 }
266
267 do {
268 npinfo->tries--;
269 netif_tx_lock(np->dev);
270
271 /*
272 * network drivers do not expect to be called if the queue is
273 * stopped.
274 */
275 status = NETDEV_TX_BUSY;
276 if (!netif_queue_stopped(np->dev))
277 status = np->dev->hard_start_xmit(skb, np->dev);
278
279 netif_tx_unlock(np->dev);
280
281 /* success */
282 if(!status) {
283 npinfo->tries = MAX_RETRIES; /* reset */
284 return;
285 }
286
287 /* transmit busy */
288 netpoll_poll(np);
289 udelay(50);
290 } while (npinfo->tries > 0);
291} 286}
292 287
293void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 288void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -649,7 +644,7 @@ int netpoll_setup(struct netpoll *np)
649 npinfo->rx_np = NULL; 644 npinfo->rx_np = NULL;
650 spin_lock_init(&npinfo->poll_lock); 645 spin_lock_init(&npinfo->poll_lock);
651 npinfo->poll_owner = -1; 646 npinfo->poll_owner = -1;
652 npinfo->tries = MAX_RETRIES; 647
653 spin_lock_init(&npinfo->rx_lock); 648 spin_lock_init(&npinfo->rx_lock);
654 skb_queue_head_init(&npinfo->arp_tx); 649 skb_queue_head_init(&npinfo->arp_tx);
655 skb_queue_head_init(&npinfo->txq); 650 skb_queue_head_init(&npinfo->txq);