diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-27 12:26:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-27 12:26:46 -0400 |
commit | 15c54033964a943de7b0763efd3bd0ede7326395 (patch) | |
tree | 840b292612d1b5396d5bab5bde537a9013db3ceb /net/xfrm/xfrm_algo.c | |
parent | ad5da3cf39a5b11a198929be1f2644e17ecd767e (diff) | |
parent | 912a41a4ab935ce8c4308428ec13fc7f8b1f18f4 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (448 commits)
[IPV4] nl_fib_lookup: Initialise res.r before fib_res_put(&res)
[IPV6]: Fix thinko in ipv6_rthdr_rcv() changes.
[IPV4]: Add multipath cached to feature-removal-schedule.txt
[WIRELESS] cfg80211: Clarify locking comment.
[WIRELESS] cfg80211: Fix locking in wiphy_new.
[WEXT] net_device: Don't include wext bits if not required.
[WEXT]: Misc code cleanups.
[WEXT]: Reduce inline abuse.
[WEXT]: Move EXPORT_SYMBOL statements where they belong.
[WEXT]: Cleanup early ioctl call path.
[WEXT]: Remove options.
[WEXT]: Remove dead debug code.
[WEXT]: Clean up how wext is called.
[WEXT]: Move to net/wireless
[AFS]: Eliminate cmpxchg() usage in vlocation code.
[RXRPC]: Fix pointers passed to bitops.
[RXRPC]: Remove bogus atomic_* overrides.
[AFS]: Fix u64 printing in debug logging.
[AFS]: Add "directory write" support.
[AFS]: Implement the CB.InitCallBackState3 operation.
...
Diffstat (limited to 'net/xfrm/xfrm_algo.c')
-rw-r--r-- | net/xfrm/xfrm_algo.c | 191 |
1 files changed, 8 insertions, 183 deletions
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index f373a8a7d9c8..be529c4241a6 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -532,8 +532,8 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); | |||
532 | int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | 532 | int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, |
533 | int offset, int len, icv_update_fn_t icv_update) | 533 | int offset, int len, icv_update_fn_t icv_update) |
534 | { | 534 | { |
535 | int start = skb_headlen(skb); | 535 | int end = skb_headlen(skb); |
536 | int i, copy = start - offset; | 536 | int i, copy = end - offset; |
537 | int err; | 537 | int err; |
538 | struct scatterlist sg; | 538 | struct scatterlist sg; |
539 | 539 | ||
@@ -556,11 +556,9 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
556 | } | 556 | } |
557 | 557 | ||
558 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 558 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
559 | int end; | 559 | BUG_TRAP(len >= 0); |
560 | 560 | ||
561 | BUG_TRAP(start <= offset + len); | 561 | end = offset + skb_shinfo(skb)->frags[i].size; |
562 | |||
563 | end = start + skb_shinfo(skb)->frags[i].size; | ||
564 | if ((copy = end - offset) > 0) { | 562 | if ((copy = end - offset) > 0) { |
565 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 563 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
566 | 564 | ||
@@ -568,7 +566,7 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
568 | copy = len; | 566 | copy = len; |
569 | 567 | ||
570 | sg.page = frag->page; | 568 | sg.page = frag->page; |
571 | sg.offset = frag->page_offset + offset-start; | 569 | sg.offset = frag->page_offset; |
572 | sg.length = copy; | 570 | sg.length = copy; |
573 | 571 | ||
574 | err = icv_update(desc, &sg, copy); | 572 | err = icv_update(desc, &sg, copy); |
@@ -579,22 +577,19 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
579 | return 0; | 577 | return 0; |
580 | offset += copy; | 578 | offset += copy; |
581 | } | 579 | } |
582 | start = end; | ||
583 | } | 580 | } |
584 | 581 | ||
585 | if (skb_shinfo(skb)->frag_list) { | 582 | if (skb_shinfo(skb)->frag_list) { |
586 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 583 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
587 | 584 | ||
588 | for (; list; list = list->next) { | 585 | for (; list; list = list->next) { |
589 | int end; | 586 | BUG_TRAP(len >= 0); |
590 | |||
591 | BUG_TRAP(start <= offset + len); | ||
592 | 587 | ||
593 | end = start + list->len; | 588 | end = offset + list->len; |
594 | if ((copy = end - offset) > 0) { | 589 | if ((copy = end - offset) > 0) { |
595 | if (copy > len) | 590 | if (copy > len) |
596 | copy = len; | 591 | copy = len; |
597 | err = skb_icv_walk(list, desc, offset-start, | 592 | err = skb_icv_walk(list, desc, 0, |
598 | copy, icv_update); | 593 | copy, icv_update); |
599 | if (unlikely(err)) | 594 | if (unlikely(err)) |
600 | return err; | 595 | return err; |
@@ -602,7 +597,6 @@ int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, | |||
602 | return 0; | 597 | return 0; |
603 | offset += copy; | 598 | offset += copy; |
604 | } | 599 | } |
605 | start = end; | ||
606 | } | 600 | } |
607 | } | 601 | } |
608 | BUG_ON(len); | 602 | BUG_ON(len); |
@@ -612,175 +606,6 @@ EXPORT_SYMBOL_GPL(skb_icv_walk); | |||
612 | 606 | ||
613 | #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) | 607 | #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) |
614 | 608 | ||
615 | /* Looking generic it is not used in another places. */ | ||
616 | |||
617 | int | ||
618 | skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | ||
619 | { | ||
620 | int start = skb_headlen(skb); | ||
621 | int i, copy = start - offset; | ||
622 | int elt = 0; | ||
623 | |||
624 | if (copy > 0) { | ||
625 | if (copy > len) | ||
626 | copy = len; | ||
627 | sg[elt].page = virt_to_page(skb->data + offset); | ||
628 | sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; | ||
629 | sg[elt].length = copy; | ||
630 | elt++; | ||
631 | if ((len -= copy) == 0) | ||
632 | return elt; | ||
633 | offset += copy; | ||
634 | } | ||
635 | |||
636 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
637 | int end; | ||
638 | |||
639 | BUG_TRAP(start <= offset + len); | ||
640 | |||
641 | end = start + skb_shinfo(skb)->frags[i].size; | ||
642 | if ((copy = end - offset) > 0) { | ||
643 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
644 | |||
645 | if (copy > len) | ||
646 | copy = len; | ||
647 | sg[elt].page = frag->page; | ||
648 | sg[elt].offset = frag->page_offset+offset-start; | ||
649 | sg[elt].length = copy; | ||
650 | elt++; | ||
651 | if (!(len -= copy)) | ||
652 | return elt; | ||
653 | offset += copy; | ||
654 | } | ||
655 | start = end; | ||
656 | } | ||
657 | |||
658 | if (skb_shinfo(skb)->frag_list) { | ||
659 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | ||
660 | |||
661 | for (; list; list = list->next) { | ||
662 | int end; | ||
663 | |||
664 | BUG_TRAP(start <= offset + len); | ||
665 | |||
666 | end = start + list->len; | ||
667 | if ((copy = end - offset) > 0) { | ||
668 | if (copy > len) | ||
669 | copy = len; | ||
670 | elt += skb_to_sgvec(list, sg+elt, offset - start, copy); | ||
671 | if ((len -= copy) == 0) | ||
672 | return elt; | ||
673 | offset += copy; | ||
674 | } | ||
675 | start = end; | ||
676 | } | ||
677 | } | ||
678 | BUG_ON(len); | ||
679 | return elt; | ||
680 | } | ||
681 | EXPORT_SYMBOL_GPL(skb_to_sgvec); | ||
682 | |||
683 | /* Check that skb data bits are writable. If they are not, copy data | ||
684 | * to newly created private area. If "tailbits" is given, make sure that | ||
685 | * tailbits bytes beyond current end of skb are writable. | ||
686 | * | ||
687 | * Returns amount of elements of scatterlist to load for subsequent | ||
688 | * transformations and pointer to writable trailer skb. | ||
689 | */ | ||
690 | |||
691 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | ||
692 | { | ||
693 | int copyflag; | ||
694 | int elt; | ||
695 | struct sk_buff *skb1, **skb_p; | ||
696 | |||
697 | /* If skb is cloned or its head is paged, reallocate | ||
698 | * head pulling out all the pages (pages are considered not writable | ||
699 | * at the moment even if they are anonymous). | ||
700 | */ | ||
701 | if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && | ||
702 | __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) | ||
703 | return -ENOMEM; | ||
704 | |||
705 | /* Easy case. Most of packets will go this way. */ | ||
706 | if (!skb_shinfo(skb)->frag_list) { | ||
707 | /* A little of trouble, not enough of space for trailer. | ||
708 | * This should not happen, when stack is tuned to generate | ||
709 | * good frames. OK, on miss we reallocate and reserve even more | ||
710 | * space, 128 bytes is fair. */ | ||
711 | |||
712 | if (skb_tailroom(skb) < tailbits && | ||
713 | pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) | ||
714 | return -ENOMEM; | ||
715 | |||
716 | /* Voila! */ | ||
717 | *trailer = skb; | ||
718 | return 1; | ||
719 | } | ||
720 | |||
721 | /* Misery. We are in troubles, going to mincer fragments... */ | ||
722 | |||
723 | elt = 1; | ||
724 | skb_p = &skb_shinfo(skb)->frag_list; | ||
725 | copyflag = 0; | ||
726 | |||
727 | while ((skb1 = *skb_p) != NULL) { | ||
728 | int ntail = 0; | ||
729 | |||
730 | /* The fragment is partially pulled by someone, | ||
731 | * this can happen on input. Copy it and everything | ||
732 | * after it. */ | ||
733 | |||
734 | if (skb_shared(skb1)) | ||
735 | copyflag = 1; | ||
736 | |||
737 | /* If the skb is the last, worry about trailer. */ | ||
738 | |||
739 | if (skb1->next == NULL && tailbits) { | ||
740 | if (skb_shinfo(skb1)->nr_frags || | ||
741 | skb_shinfo(skb1)->frag_list || | ||
742 | skb_tailroom(skb1) < tailbits) | ||
743 | ntail = tailbits + 128; | ||
744 | } | ||
745 | |||
746 | if (copyflag || | ||
747 | skb_cloned(skb1) || | ||
748 | ntail || | ||
749 | skb_shinfo(skb1)->nr_frags || | ||
750 | skb_shinfo(skb1)->frag_list) { | ||
751 | struct sk_buff *skb2; | ||
752 | |||
753 | /* Fuck, we are miserable poor guys... */ | ||
754 | if (ntail == 0) | ||
755 | skb2 = skb_copy(skb1, GFP_ATOMIC); | ||
756 | else | ||
757 | skb2 = skb_copy_expand(skb1, | ||
758 | skb_headroom(skb1), | ||
759 | ntail, | ||
760 | GFP_ATOMIC); | ||
761 | if (unlikely(skb2 == NULL)) | ||
762 | return -ENOMEM; | ||
763 | |||
764 | if (skb1->sk) | ||
765 | skb_set_owner_w(skb2, skb1->sk); | ||
766 | |||
767 | /* Looking around. Are we still alive? | ||
768 | * OK, link new skb, drop old one */ | ||
769 | |||
770 | skb2->next = skb1->next; | ||
771 | *skb_p = skb2; | ||
772 | kfree_skb(skb1); | ||
773 | skb1 = skb2; | ||
774 | } | ||
775 | elt++; | ||
776 | *trailer = skb1; | ||
777 | skb_p = &skb1->next; | ||
778 | } | ||
779 | |||
780 | return elt; | ||
781 | } | ||
782 | EXPORT_SYMBOL_GPL(skb_cow_data); | ||
783 | |||
784 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) | 609 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) |
785 | { | 610 | { |
786 | if (tail != skb) { | 611 | if (tail != skb) { |