aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-08-15 18:15:10 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-15 22:52:31 -0400
commitf42157cb568c1eb02eca7df4da67553a9edae24a (patch)
treef8c104a73ae5558e104a07e254a9eafd7839269d
parentdb543c1f973cd1d557cc32ceee76737c1e4d2898 (diff)
tun: fallback if skb_alloc() fails on big packets
skb_alloc produces linear packets (using kmalloc()). That can fail, so should we fall back to making paged skbs. My original version of this patch always allocate paged skbs for big packets. But that made performance drop from 8.4 seconds to 8.8 seconds on 1G lguest->Host TCP xmit. So now we only do that as a fallback. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Max Krasnyansky <maxk@qualcomm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/tun.c66
1 files changed, 62 insertions, 4 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 95931a5a9883..6daea0c91862 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -358,6 +358,66 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
358 return mask; 358 return mask;
359} 359}
360 360
361/* prepad is the amount to reserve at front. len is length after that.
362 * linear is a hint as to how much to copy (usually headers). */
363static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear,
364 gfp_t gfp)
365{
366 struct sk_buff *skb;
367 unsigned int i;
368
369 skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN);
370 if (skb) {
371 skb_reserve(skb, prepad);
372 skb_put(skb, len);
373 return skb;
374 }
375
376 /* Under a page? Don't bother with paged skb. */
377 if (prepad + len < PAGE_SIZE)
378 return NULL;
379
380 /* Start with a normal skb, and add pages. */
381 skb = alloc_skb(prepad + linear, gfp);
382 if (!skb)
383 return NULL;
384
385 skb_reserve(skb, prepad);
386 skb_put(skb, linear);
387
388 len -= linear;
389
390 for (i = 0; i < MAX_SKB_FRAGS; i++) {
391 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
392
393 f->page = alloc_page(gfp|__GFP_ZERO);
394 if (!f->page)
395 break;
396
397 f->page_offset = 0;
398 f->size = PAGE_SIZE;
399
400 skb->data_len += PAGE_SIZE;
401 skb->len += PAGE_SIZE;
402 skb->truesize += PAGE_SIZE;
403 skb_shinfo(skb)->nr_frags++;
404
405 if (len < PAGE_SIZE) {
406 len = 0;
407 break;
408 }
409 len -= PAGE_SIZE;
410 }
411
412 /* Too large, or alloc fail? */
413 if (unlikely(len)) {
414 kfree_skb(skb);
415 skb = NULL;
416 }
417
418 return skb;
419}
420
361/* Get packet from user space buffer */ 421/* Get packet from user space buffer */
362static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count) 422static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
363{ 423{
@@ -391,14 +451,12 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
391 return -EINVAL; 451 return -EINVAL;
392 } 452 }
393 453
394 if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { 454 if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) {
395 tun->dev->stats.rx_dropped++; 455 tun->dev->stats.rx_dropped++;
396 return -ENOMEM; 456 return -ENOMEM;
397 } 457 }
398 458
399 if (align) 459 if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
400 skb_reserve(skb, align);
401 if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
402 tun->dev->stats.rx_dropped++; 460 tun->dev->stats.rx_dropped++;
403 kfree_skb(skb); 461 kfree_skb(skb);
404 return -EFAULT; 462 return -EFAULT;