aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorIan Campbell <Ian.Campbell@citrix.com>2011-08-22 19:44:58 -0400
committerDavid S. Miller <davem@davemloft.net>2011-08-24 20:52:11 -0400
commitea2ab69379a941c6f8884e290fdd28c93936a778 (patch)
treec02aa9c5ed66b1640b54bb6b763d131b9419be29 /net/core/skbuff.c
parent15133fbbb91ae695f153fb48daa6a1a8af4a5032 (diff)
net: convert core to skb paged frag APIs
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: "Michał Mirosław" <mirq-linux@rere.qmqm.pl> Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e27334ec367a..296afd0aa8d2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -326,7 +326,7 @@ static void skb_release_data(struct sk_buff *skb)
326 if (skb_shinfo(skb)->nr_frags) { 326 if (skb_shinfo(skb)->nr_frags) {
327 int i; 327 int i;
328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
329 put_page(skb_shinfo(skb)->frags[i].page); 329 skb_frag_unref(skb, i);
330 } 330 }
331 331
332 /* 332 /*
@@ -809,7 +809,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
809 } 809 }
810 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 810 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
811 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 811 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
812 get_page(skb_shinfo(n)->frags[i].page); 812 skb_frag_ref(skb, i);
813 } 813 }
814 skb_shinfo(n)->nr_frags = i; 814 skb_shinfo(n)->nr_frags = i;
815 } 815 }
@@ -901,7 +901,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
901 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 901 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
902 } 902 }
903 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 903 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
904 get_page(skb_shinfo(skb)->frags[i].page); 904 skb_frag_ref(skb, i);
905 905
906 if (skb_has_frag_list(skb)) 906 if (skb_has_frag_list(skb))
907 skb_clone_fraglist(skb); 907 skb_clone_fraglist(skb);
@@ -1181,7 +1181,7 @@ drop_pages:
1181 skb_shinfo(skb)->nr_frags = i; 1181 skb_shinfo(skb)->nr_frags = i;
1182 1182
1183 for (; i < nfrags; i++) 1183 for (; i < nfrags; i++)
1184 put_page(skb_shinfo(skb)->frags[i].page); 1184 skb_frag_unref(skb, i);
1185 1185
1186 if (skb_has_frag_list(skb)) 1186 if (skb_has_frag_list(skb))
1187 skb_drop_fraglist(skb); 1187 skb_drop_fraglist(skb);
@@ -1350,7 +1350,7 @@ pull_pages:
1350 k = 0; 1350 k = 0;
1351 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1351 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1352 if (skb_shinfo(skb)->frags[i].size <= eat) { 1352 if (skb_shinfo(skb)->frags[i].size <= eat) {
1353 put_page(skb_shinfo(skb)->frags[i].page); 1353 skb_frag_unref(skb, i);
1354 eat -= skb_shinfo(skb)->frags[i].size; 1354 eat -= skb_shinfo(skb)->frags[i].size;
1355 } else { 1355 } else {
1356 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1356 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -1609,7 +1609,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1609 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1609 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1610 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1610 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1611 1611
1612 if (__splice_segment(f->page, f->page_offset, f->size, 1612 if (__splice_segment(skb_frag_page(f),
1613 f->page_offset, f->size,
1613 offset, len, skb, spd, 0, sk, pipe)) 1614 offset, len, skb, spd, 0, sk, pipe))
1614 return 1; 1615 return 1;
1615 } 1616 }
@@ -2154,7 +2155,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2154 * where splitting is expensive. 2155 * where splitting is expensive.
2155 * 2. Split is accurately. We make this. 2156 * 2. Split is accurately. We make this.
2156 */ 2157 */
2157 get_page(skb_shinfo(skb)->frags[i].page); 2158 skb_frag_ref(skb, i);
2158 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2159 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2159 skb_shinfo(skb1)->frags[0].size -= len - pos; 2160 skb_shinfo(skb1)->frags[0].size -= len - pos;
2160 skb_shinfo(skb)->frags[i].size = len - pos; 2161 skb_shinfo(skb)->frags[i].size = len - pos;
@@ -2229,7 +2230,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2229 * commit all, so that we don't have to undo partial changes 2230 * commit all, so that we don't have to undo partial changes
2230 */ 2231 */
2231 if (!to || 2232 if (!to ||
2232 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2233 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2234 fragfrom->page_offset)) {
2233 merge = -1; 2235 merge = -1;
2234 } else { 2236 } else {
2235 merge = to - 1; 2237 merge = to - 1;
@@ -2276,7 +2278,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2276 to++; 2278 to++;
2277 2279
2278 } else { 2280 } else {
2279 get_page(fragfrom->page); 2281 __skb_frag_ref(fragfrom);
2280 fragto->page = fragfrom->page; 2282 fragto->page = fragfrom->page;
2281 fragto->page_offset = fragfrom->page_offset; 2283 fragto->page_offset = fragfrom->page_offset;
2282 fragto->size = todo; 2284 fragto->size = todo;
@@ -2298,7 +2300,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2298 fragto = &skb_shinfo(tgt)->frags[merge]; 2300 fragto = &skb_shinfo(tgt)->frags[merge];
2299 2301
2300 fragto->size += fragfrom->size; 2302 fragto->size += fragfrom->size;
2301 put_page(fragfrom->page); 2303 __skb_frag_unref(fragfrom);
2302 } 2304 }
2303 2305
2304 /* Reposition in the original skb */ 2306 /* Reposition in the original skb */
@@ -2543,8 +2545,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2543 left = PAGE_SIZE - frag->page_offset; 2545 left = PAGE_SIZE - frag->page_offset;
2544 copy = (length > left)? left : length; 2546 copy = (length > left)? left : length;
2545 2547
2546 ret = getfrag(from, (page_address(frag->page) + 2548 ret = getfrag(from, skb_frag_address(frag) + frag->size,
2547 frag->page_offset + frag->size),
2548 offset, copy, 0, skb); 2549 offset, copy, 0, skb);
2549 if (ret < 0) 2550 if (ret < 0)
2550 return -EFAULT; 2551 return -EFAULT;
@@ -2696,7 +2697,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2696 2697
2697 while (pos < offset + len && i < nfrags) { 2698 while (pos < offset + len && i < nfrags) {
2698 *frag = skb_shinfo(skb)->frags[i]; 2699 *frag = skb_shinfo(skb)->frags[i];
2699 get_page(frag->page); 2700 __skb_frag_ref(frag);
2700 size = frag->size; 2701 size = frag->size;
2701 2702
2702 if (pos < offset) { 2703 if (pos < offset) {
@@ -2919,7 +2920,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2919 2920
2920 if (copy > len) 2921 if (copy > len)
2921 copy = len; 2922 copy = len;
2922 sg_set_page(&sg[elt], frag->page, copy, 2923 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
2923 frag->page_offset+offset-start); 2924 frag->page_offset+offset-start);
2924 elt++; 2925 elt++;
2925 if (!(len -= copy)) 2926 if (!(len -= copy))