aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/skbuff.c188
-rw-r--r--net/xfrm/xfrm_algo.c169
2 files changed, 188 insertions, 169 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 331d3efa82fa..f927b6e8027e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -55,6 +55,7 @@
55#include <linux/cache.h> 55#include <linux/cache.h>
56#include <linux/rtnetlink.h> 56#include <linux/rtnetlink.h>
57#include <linux/init.h> 57#include <linux/init.h>
58#include <linux/scatterlist.h>
58 59
59#include <net/protocol.h> 60#include <net/protocol.h>
60#include <net/dst.h> 61#include <net/dst.h>
@@ -2002,6 +2003,190 @@ void __init skb_init(void)
2002 NULL, NULL); 2003 NULL, NULL);
2003} 2004}
2004 2005
2006/**
2007 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2008 * @skb: Socket buffer containing the buffers to be mapped
2009 * @sg: The scatter-gather list to map into
2010 * @offset: The offset into the buffer's contents to start mapping
2011 * @len: Length of buffer space to be mapped
2012 *
2013 * Fill the specified scatter-gather list with mappings/pointers into a
2014 * region of the buffer space attached to a socket buffer.
2015 */
2016int
2017skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2018{
2019 int start = skb_headlen(skb);
2020 int i, copy = start - offset;
2021 int elt = 0;
2022
2023 if (copy > 0) {
2024 if (copy > len)
2025 copy = len;
2026 sg[elt].page = virt_to_page(skb->data + offset);
2027 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
2028 sg[elt].length = copy;
2029 elt++;
2030 if ((len -= copy) == 0)
2031 return elt;
2032 offset += copy;
2033 }
2034
2035 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2036 int end;
2037
2038 BUG_TRAP(start <= offset + len);
2039
2040 end = start + skb_shinfo(skb)->frags[i].size;
2041 if ((copy = end - offset) > 0) {
2042 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2043
2044 if (copy > len)
2045 copy = len;
2046 sg[elt].page = frag->page;
2047 sg[elt].offset = frag->page_offset+offset-start;
2048 sg[elt].length = copy;
2049 elt++;
2050 if (!(len -= copy))
2051 return elt;
2052 offset += copy;
2053 }
2054 start = end;
2055 }
2056
2057 if (skb_shinfo(skb)->frag_list) {
2058 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2059
2060 for (; list; list = list->next) {
2061 int end;
2062
2063 BUG_TRAP(start <= offset + len);
2064
2065 end = start + list->len;
2066 if ((copy = end - offset) > 0) {
2067 if (copy > len)
2068 copy = len;
2069 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
2070 if ((len -= copy) == 0)
2071 return elt;
2072 offset += copy;
2073 }
2074 start = end;
2075 }
2076 }
2077 BUG_ON(len);
2078 return elt;
2079}
2080
2081/**
2082 * skb_cow_data - Check that a socket buffer's data buffers are writable
2083 * @skb: The socket buffer to check.
2084 * @tailbits: Amount of trailing space to be added
2085 * @trailer: Returned pointer to the skb where the @tailbits space begins
2086 *
2087 * Make sure that the data buffers attached to a socket buffer are
2088 * writable. If they are not, private copies are made of the data buffers
2089 * and the socket buffer is set to use these instead.
2090 *
2091 * If @tailbits is given, make sure that there is space to write @tailbits
2092 * bytes of data beyond current end of socket buffer. @trailer will be
2093 * set to point to the skb in which this space begins.
2094 *
2095 * The number of scatterlist elements required to completely map the
2096 * COW'd and extended socket buffer will be returned.
2097 */
2098int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2099{
2100 int copyflag;
2101 int elt;
2102 struct sk_buff *skb1, **skb_p;
2103
2104 /* If skb is cloned or its head is paged, reallocate
2105 * head pulling out all the pages (pages are considered not writable
2106 * at the moment even if they are anonymous).
2107 */
2108 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2109 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2110 return -ENOMEM;
2111
2112 /* Easy case. Most of packets will go this way. */
2113 if (!skb_shinfo(skb)->frag_list) {
2114 /* A little of trouble, not enough of space for trailer.
2115 * This should not happen, when stack is tuned to generate
2116 * good frames. OK, on miss we reallocate and reserve even more
2117 * space, 128 bytes is fair. */
2118
2119 if (skb_tailroom(skb) < tailbits &&
2120 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2121 return -ENOMEM;
2122
2123 /* Voila! */
2124 *trailer = skb;
2125 return 1;
2126 }
2127
2128 /* Misery. We are in troubles, going to mincer fragments... */
2129
2130 elt = 1;
2131 skb_p = &skb_shinfo(skb)->frag_list;
2132 copyflag = 0;
2133
2134 while ((skb1 = *skb_p) != NULL) {
2135 int ntail = 0;
2136
2137 /* The fragment is partially pulled by someone,
2138 * this can happen on input. Copy it and everything
2139 * after it. */
2140
2141 if (skb_shared(skb1))
2142 copyflag = 1;
2143
2144 /* If the skb is the last, worry about trailer. */
2145
2146 if (skb1->next == NULL && tailbits) {
2147 if (skb_shinfo(skb1)->nr_frags ||
2148 skb_shinfo(skb1)->frag_list ||
2149 skb_tailroom(skb1) < tailbits)
2150 ntail = tailbits + 128;
2151 }
2152
2153 if (copyflag ||
2154 skb_cloned(skb1) ||
2155 ntail ||
2156 skb_shinfo(skb1)->nr_frags ||
2157 skb_shinfo(skb1)->frag_list) {
2158 struct sk_buff *skb2;
2159
2160 /* Fuck, we are miserable poor guys... */
2161 if (ntail == 0)
2162 skb2 = skb_copy(skb1, GFP_ATOMIC);
2163 else
2164 skb2 = skb_copy_expand(skb1,
2165 skb_headroom(skb1),
2166 ntail,
2167 GFP_ATOMIC);
2168 if (unlikely(skb2 == NULL))
2169 return -ENOMEM;
2170
2171 if (skb1->sk)
2172 skb_set_owner_w(skb2, skb1->sk);
2173
2174 /* Looking around. Are we still alive?
2175 * OK, link new skb, drop old one */
2176
2177 skb2->next = skb1->next;
2178 *skb_p = skb2;
2179 kfree_skb(skb1);
2180 skb1 = skb2;
2181 }
2182 elt++;
2183 *trailer = skb1;
2184 skb_p = &skb1->next;
2185 }
2186
2187 return elt;
2188}
2189
2005EXPORT_SYMBOL(___pskb_trim); 2190EXPORT_SYMBOL(___pskb_trim);
2006EXPORT_SYMBOL(__kfree_skb); 2191EXPORT_SYMBOL(__kfree_skb);
2007EXPORT_SYMBOL(kfree_skb); 2192EXPORT_SYMBOL(kfree_skb);
@@ -2036,3 +2221,6 @@ EXPORT_SYMBOL(skb_seq_read);
2036EXPORT_SYMBOL(skb_abort_seq_read); 2221EXPORT_SYMBOL(skb_abort_seq_read);
2037EXPORT_SYMBOL(skb_find_text); 2222EXPORT_SYMBOL(skb_find_text);
2038EXPORT_SYMBOL(skb_append_datato_frags); 2223EXPORT_SYMBOL(skb_append_datato_frags);
2224
2225EXPORT_SYMBOL_GPL(skb_to_sgvec);
2226EXPORT_SYMBOL_GPL(skb_cow_data);
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index f373a8a7d9c8..6249a9405bb8 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -612,175 +612,6 @@ EXPORT_SYMBOL_GPL(skb_icv_walk);
612 612
613#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) 613#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
614 614
615/* Looking generic it is not used in another places. */
616
617int
618skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
619{
620 int start = skb_headlen(skb);
621 int i, copy = start - offset;
622 int elt = 0;
623
624 if (copy > 0) {
625 if (copy > len)
626 copy = len;
627 sg[elt].page = virt_to_page(skb->data + offset);
628 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
629 sg[elt].length = copy;
630 elt++;
631 if ((len -= copy) == 0)
632 return elt;
633 offset += copy;
634 }
635
636 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
637 int end;
638
639 BUG_TRAP(start <= offset + len);
640
641 end = start + skb_shinfo(skb)->frags[i].size;
642 if ((copy = end - offset) > 0) {
643 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
644
645 if (copy > len)
646 copy = len;
647 sg[elt].page = frag->page;
648 sg[elt].offset = frag->page_offset+offset-start;
649 sg[elt].length = copy;
650 elt++;
651 if (!(len -= copy))
652 return elt;
653 offset += copy;
654 }
655 start = end;
656 }
657
658 if (skb_shinfo(skb)->frag_list) {
659 struct sk_buff *list = skb_shinfo(skb)->frag_list;
660
661 for (; list; list = list->next) {
662 int end;
663
664 BUG_TRAP(start <= offset + len);
665
666 end = start + list->len;
667 if ((copy = end - offset) > 0) {
668 if (copy > len)
669 copy = len;
670 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
671 if ((len -= copy) == 0)
672 return elt;
673 offset += copy;
674 }
675 start = end;
676 }
677 }
678 BUG_ON(len);
679 return elt;
680}
681EXPORT_SYMBOL_GPL(skb_to_sgvec);
682
683/* Check that skb data bits are writable. If they are not, copy data
684 * to newly created private area. If "tailbits" is given, make sure that
685 * tailbits bytes beyond current end of skb are writable.
686 *
687 * Returns amount of elements of scatterlist to load for subsequent
688 * transformations and pointer to writable trailer skb.
689 */
690
691int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
692{
693 int copyflag;
694 int elt;
695 struct sk_buff *skb1, **skb_p;
696
697 /* If skb is cloned or its head is paged, reallocate
698 * head pulling out all the pages (pages are considered not writable
699 * at the moment even if they are anonymous).
700 */
701 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
702 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
703 return -ENOMEM;
704
705 /* Easy case. Most of packets will go this way. */
706 if (!skb_shinfo(skb)->frag_list) {
707 /* A little of trouble, not enough of space for trailer.
708 * This should not happen, when stack is tuned to generate
709 * good frames. OK, on miss we reallocate and reserve even more
710 * space, 128 bytes is fair. */
711
712 if (skb_tailroom(skb) < tailbits &&
713 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
714 return -ENOMEM;
715
716 /* Voila! */
717 *trailer = skb;
718 return 1;
719 }
720
721 /* Misery. We are in troubles, going to mincer fragments... */
722
723 elt = 1;
724 skb_p = &skb_shinfo(skb)->frag_list;
725 copyflag = 0;
726
727 while ((skb1 = *skb_p) != NULL) {
728 int ntail = 0;
729
730 /* The fragment is partially pulled by someone,
731 * this can happen on input. Copy it and everything
732 * after it. */
733
734 if (skb_shared(skb1))
735 copyflag = 1;
736
737 /* If the skb is the last, worry about trailer. */
738
739 if (skb1->next == NULL && tailbits) {
740 if (skb_shinfo(skb1)->nr_frags ||
741 skb_shinfo(skb1)->frag_list ||
742 skb_tailroom(skb1) < tailbits)
743 ntail = tailbits + 128;
744 }
745
746 if (copyflag ||
747 skb_cloned(skb1) ||
748 ntail ||
749 skb_shinfo(skb1)->nr_frags ||
750 skb_shinfo(skb1)->frag_list) {
751 struct sk_buff *skb2;
752
753 /* Fuck, we are miserable poor guys... */
754 if (ntail == 0)
755 skb2 = skb_copy(skb1, GFP_ATOMIC);
756 else
757 skb2 = skb_copy_expand(skb1,
758 skb_headroom(skb1),
759 ntail,
760 GFP_ATOMIC);
761 if (unlikely(skb2 == NULL))
762 return -ENOMEM;
763
764 if (skb1->sk)
765 skb_set_owner_w(skb2, skb1->sk);
766
767 /* Looking around. Are we still alive?
768 * OK, link new skb, drop old one */
769
770 skb2->next = skb1->next;
771 *skb_p = skb2;
772 kfree_skb(skb1);
773 skb1 = skb2;
774 }
775 elt++;
776 *trailer = skb1;
777 skb_p = &skb1->next;
778 }
779
780 return elt;
781}
782EXPORT_SYMBOL_GPL(skb_cow_data);
783
784void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 615void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
785{ 616{
786 if (tail != skb) { 617 if (tail != skb) {