aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorMatt Mackall <mpm@selenic.com>2005-08-11 22:25:54 -0400
committerDavid S. Miller <davem@davemloft.net>2005-08-11 22:25:54 -0400
commit0db1d6fc1ea051af49ebe03c503d23996a7c5bbb (patch)
tree6afab02002a46b045a3b8769342ad277402f0d95 /net/core/netpoll.c
parentf0d3459d0722782c7d9d0e35a1ed0815e75fcde5 (diff)
[NETPOLL]: add retry timeout
Add limited retry logic to netpoll_send_skb Each time we attempt to send, decrement our per-device retry counter. On every successful send, we reset the counter. We delay 50us between attempts with up to 20000 retries for a total of 1 second. After we've exhausted our retries, subsequent failed attempts will try only once until reset by success. Signed-off-by: Matt Mackall <mpm@selenic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 59ed186e4f4..d09affdbad3 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -33,6 +33,7 @@
33#define MAX_UDP_CHUNK 1460 33#define MAX_UDP_CHUNK 1460
34#define MAX_SKBS 32 34#define MAX_SKBS 32
35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36#define MAX_RETRIES 20000
36 37
37static DEFINE_SPINLOCK(skb_list_lock); 38static DEFINE_SPINLOCK(skb_list_lock);
38static int nr_skbs; 39static int nr_skbs;
@@ -265,7 +266,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
265 return; 266 return;
266 } 267 }
267 268
268 while (1) { 269 do {
270 npinfo->tries--;
269 spin_lock(&np->dev->xmit_lock); 271 spin_lock(&np->dev->xmit_lock);
270 np->dev->xmit_lock_owner = smp_processor_id(); 272 np->dev->xmit_lock_owner = smp_processor_id();
271 273
@@ -277,6 +279,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
277 np->dev->xmit_lock_owner = -1; 279 np->dev->xmit_lock_owner = -1;
278 spin_unlock(&np->dev->xmit_lock); 280 spin_unlock(&np->dev->xmit_lock);
279 netpoll_poll(np); 281 netpoll_poll(np);
282 udelay(50);
280 continue; 283 continue;
281 } 284 }
282 285
@@ -285,12 +288,15 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
285 spin_unlock(&np->dev->xmit_lock); 288 spin_unlock(&np->dev->xmit_lock);
286 289
287 /* success */ 290 /* success */
288 if(!status) 291 if(!status) {
292 npinfo->tries = MAX_RETRIES; /* reset */
289 return; 293 return;
294 }
290 295
291 /* transmit busy */ 296 /* transmit busy */
292 netpoll_poll(np); 297 netpoll_poll(np);
293 } 298 udelay(50);
299 } while (npinfo->tries > 0);
294} 300}
295 301
296void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 302void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -642,6 +648,7 @@ int netpoll_setup(struct netpoll *np)
642 npinfo->rx_np = NULL; 648 npinfo->rx_np = NULL;
643 npinfo->poll_lock = SPIN_LOCK_UNLOCKED; 649 npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
644 npinfo->poll_owner = -1; 650 npinfo->poll_owner = -1;
651 npinfo->tries = MAX_RETRIES;
645 npinfo->rx_lock = SPIN_LOCK_UNLOCKED; 652 npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
646 } else 653 } else
647 npinfo = ndev->npinfo; 654 npinfo = ndev->npinfo;