aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-10-14 03:38:15 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-15 15:26:25 -0400
commit1706d58763c36133d7fce6cc78b1444fd40db28c (patch)
treee581d13cb65d85aecf795783ad0cc1e1452d84d2 /net/ipv4
parente0053ec07e32ec94535c47b10af3377255f00836 (diff)
[IPV4]: Make ip_defrag return the same packet
This patch is a bit of a hack. However it is worth it if you consider that this is the only reason why we have to carry around the struct sk_buff ** pointers in netfilter. It makes ip_defrag always return the packet that was given to it on input. It does this by cloning the packet and replacing its original contents with the head fragment if necessary. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ip_fragment.c76
1 files changed, 55 insertions, 21 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index fabb86db763b..d7fa2bf3a0c1 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -109,6 +109,9 @@ static u32 ipfrag_hash_rnd;
109static LIST_HEAD(ipq_lru_list); 109static LIST_HEAD(ipq_lru_list);
110int ip_frag_nqueues = 0; 110int ip_frag_nqueues = 0;
111 111
112static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
113 struct net_device *dev);
114
112static __inline__ void __ipq_unlink(struct ipq *qp) 115static __inline__ void __ipq_unlink(struct ipq *qp)
113{ 116{
114 hlist_del(&qp->list); 117 hlist_del(&qp->list);
@@ -464,17 +467,20 @@ static int ip_frag_reinit(struct ipq *qp)
464} 467}
465 468
466/* Add new segment to existing queue. */ 469/* Add new segment to existing queue. */
467static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) 470static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
468{ 471{
469 struct sk_buff *prev, *next; 472 struct sk_buff *prev, *next;
473 struct net_device *dev;
470 int flags, offset; 474 int flags, offset;
471 int ihl, end; 475 int ihl, end;
476 int err = -ENOENT;
472 477
473 if (qp->last_in & COMPLETE) 478 if (qp->last_in & COMPLETE)
474 goto err; 479 goto err;
475 480
476 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && 481 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
477 unlikely(ip_frag_too_far(qp)) && unlikely(ip_frag_reinit(qp))) { 482 unlikely(ip_frag_too_far(qp)) &&
483 unlikely(err = ip_frag_reinit(qp))) {
478 ipq_kill(qp); 484 ipq_kill(qp);
479 goto err; 485 goto err;
480 } 486 }
@@ -487,6 +493,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
487 493
488 /* Determine the position of this fragment. */ 494 /* Determine the position of this fragment. */
489 end = offset + skb->len - ihl; 495 end = offset + skb->len - ihl;
496 err = -EINVAL;
490 497
491 /* Is this the final fragment? */ 498 /* Is this the final fragment? */
492 if ((flags & IP_MF) == 0) { 499 if ((flags & IP_MF) == 0) {
@@ -514,9 +521,12 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
514 if (end == offset) 521 if (end == offset)
515 goto err; 522 goto err;
516 523
524 err = -ENOMEM;
517 if (pskb_pull(skb, ihl) == NULL) 525 if (pskb_pull(skb, ihl) == NULL)
518 goto err; 526 goto err;
519 if (pskb_trim_rcsum(skb, end-offset)) 527
528 err = pskb_trim_rcsum(skb, end - offset);
529 if (err)
520 goto err; 530 goto err;
521 531
522 /* Find out which fragments are in front and at the back of us 532 /* Find out which fragments are in front and at the back of us
@@ -539,8 +549,10 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
539 549
540 if (i > 0) { 550 if (i > 0) {
541 offset += i; 551 offset += i;
552 err = -EINVAL;
542 if (end <= offset) 553 if (end <= offset)
543 goto err; 554 goto err;
555 err = -ENOMEM;
544 if (!pskb_pull(skb, i)) 556 if (!pskb_pull(skb, i))
545 goto err; 557 goto err;
546 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 558 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
@@ -548,6 +560,8 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
548 } 560 }
549 } 561 }
550 562
563 err = -ENOMEM;
564
551 while (next && FRAG_CB(next)->offset < end) { 565 while (next && FRAG_CB(next)->offset < end) {
552 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ 566 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
553 567
@@ -589,37 +603,62 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
589 else 603 else
590 qp->fragments = skb; 604 qp->fragments = skb;
591 605
592 if (skb->dev) 606 dev = skb->dev;
593 qp->iif = skb->dev->ifindex; 607 if (dev) {
594 skb->dev = NULL; 608 qp->iif = dev->ifindex;
609 skb->dev = NULL;
610 }
595 qp->stamp = skb->tstamp; 611 qp->stamp = skb->tstamp;
596 qp->meat += skb->len; 612 qp->meat += skb->len;
597 atomic_add(skb->truesize, &ip_frag_mem); 613 atomic_add(skb->truesize, &ip_frag_mem);
598 if (offset == 0) 614 if (offset == 0)
599 qp->last_in |= FIRST_IN; 615 qp->last_in |= FIRST_IN;
600 616
617 if (qp->last_in == (FIRST_IN | LAST_IN) && qp->meat == qp->len)
618 return ip_frag_reasm(qp, prev, dev);
619
601 write_lock(&ipfrag_lock); 620 write_lock(&ipfrag_lock);
602 list_move_tail(&qp->lru_list, &ipq_lru_list); 621 list_move_tail(&qp->lru_list, &ipq_lru_list);
603 write_unlock(&ipfrag_lock); 622 write_unlock(&ipfrag_lock);
604 623 return -EINPROGRESS;
605 return;
606 624
607err: 625err:
608 kfree_skb(skb); 626 kfree_skb(skb);
627 return err;
609} 628}
610 629
611 630
612/* Build a new IP datagram from all its fragments. */ 631/* Build a new IP datagram from all its fragments. */
613 632
614static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) 633static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
634 struct net_device *dev)
615{ 635{
616 struct iphdr *iph; 636 struct iphdr *iph;
617 struct sk_buff *fp, *head = qp->fragments; 637 struct sk_buff *fp, *head = qp->fragments;
618 int len; 638 int len;
619 int ihlen; 639 int ihlen;
640 int err;
620 641
621 ipq_kill(qp); 642 ipq_kill(qp);
622 643
644 /* Make the one we just received the head. */
645 if (prev) {
646 head = prev->next;
647 fp = skb_clone(head, GFP_ATOMIC);
648
649 if (!fp)
650 goto out_nomem;
651
652 fp->next = head->next;
653 prev->next = fp;
654
655 skb_morph(head, qp->fragments);
656 head->next = qp->fragments->next;
657
658 kfree_skb(qp->fragments);
659 qp->fragments = head;
660 }
661
623 BUG_TRAP(head != NULL); 662 BUG_TRAP(head != NULL);
624 BUG_TRAP(FRAG_CB(head)->offset == 0); 663 BUG_TRAP(FRAG_CB(head)->offset == 0);
625 664
@@ -627,10 +666,12 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
627 ihlen = ip_hdrlen(head); 666 ihlen = ip_hdrlen(head);
628 len = ihlen + qp->len; 667 len = ihlen + qp->len;
629 668
669 err = -E2BIG;
630 if (len > 65535) 670 if (len > 65535)
631 goto out_oversize; 671 goto out_oversize;
632 672
633 /* Head of list must not be cloned. */ 673 /* Head of list must not be cloned. */
674 err = -ENOMEM;
634 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 675 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
635 goto out_nomem; 676 goto out_nomem;
636 677
@@ -681,7 +722,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
681 iph->tot_len = htons(len); 722 iph->tot_len = htons(len);
682 IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); 723 IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
683 qp->fragments = NULL; 724 qp->fragments = NULL;
684 return head; 725 return 0;
685 726
686out_nomem: 727out_nomem:
687 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " 728 LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
@@ -694,14 +735,13 @@ out_oversize:
694 NIPQUAD(qp->saddr)); 735 NIPQUAD(qp->saddr));
695out_fail: 736out_fail:
696 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 737 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
697 return NULL; 738 return err;
698} 739}
699 740
700/* Process an incoming IP datagram fragment. */ 741/* Process an incoming IP datagram fragment. */
701struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user) 742struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
702{ 743{
703 struct ipq *qp; 744 struct ipq *qp;
704 struct net_device *dev;
705 745
706 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 746 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
707 747
@@ -709,23 +749,17 @@ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
709 if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh) 749 if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
710 ip_evictor(); 750 ip_evictor();
711 751
712 dev = skb->dev;
713
714 /* Lookup (or create) queue header */ 752 /* Lookup (or create) queue header */
715 if ((qp = ip_find(ip_hdr(skb), user)) != NULL) { 753 if ((qp = ip_find(ip_hdr(skb), user)) != NULL) {
716 struct sk_buff *ret = NULL; 754 int ret;
717 755
718 spin_lock(&qp->lock); 756 spin_lock(&qp->lock);
719 757
720 ip_frag_queue(qp, skb); 758 ret = ip_frag_queue(qp, skb);
721
722 if (qp->last_in == (FIRST_IN|LAST_IN) &&
723 qp->meat == qp->len)
724 ret = ip_frag_reasm(qp, dev);
725 759
726 spin_unlock(&qp->lock); 760 spin_unlock(&qp->lock);
727 ipq_put(qp, NULL); 761 ipq_put(qp, NULL);
728 return ret; 762 return ret ? NULL : skb;
729 } 763 }
730 764
731 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); 765 IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);