aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c195
1 files changed, 113 insertions, 82 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 27002dffe7ed..18a3cebb753d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -184,11 +184,21 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
184 goto out; 184 goto out;
185 prefetchw(skb); 185 prefetchw(skb);
186 186
187 /* We do our best to align skb_shared_info on a separate cache
188 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
189 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
190 * Both skb->head and skb_shared_info are cache line aligned.
191 */
187 size = SKB_DATA_ALIGN(size); 192 size = SKB_DATA_ALIGN(size);
188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 193 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
189 gfp_mask, node); 194 data = kmalloc_node_track_caller(size, gfp_mask, node);
190 if (!data) 195 if (!data)
191 goto nodata; 196 goto nodata;
197 /* kmalloc(size) might give us more room than requested.
198 * Put skb_shared_info exactly at the end of allocated zone,
199 * to allow max possible filling before reallocation.
200 */
201 size = SKB_WITH_OVERHEAD(ksize(data));
192 prefetchw(data + size); 202 prefetchw(data + size);
193 203
194 /* 204 /*
@@ -197,7 +207,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
197 * the tail pointer in struct sk_buff! 207 * the tail pointer in struct sk_buff!
198 */ 208 */
199 memset(skb, 0, offsetof(struct sk_buff, tail)); 209 memset(skb, 0, offsetof(struct sk_buff, tail));
200 skb->truesize = size + sizeof(struct sk_buff); 210 /* Account for allocated memory : skb + skb->head */
211 skb->truesize = SKB_TRUESIZE(size);
201 atomic_set(&skb->users, 1); 212 atomic_set(&skb->users, 1);
202 skb->head = data; 213 skb->head = data;
203 skb->data = data; 214 skb->data = data;
@@ -326,7 +337,7 @@ static void skb_release_data(struct sk_buff *skb)
326 if (skb_shinfo(skb)->nr_frags) { 337 if (skb_shinfo(skb)->nr_frags) {
327 int i; 338 int i;
328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 339 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
329 put_page(skb_shinfo(skb)->frags[i].page); 340 skb_frag_unref(skb, i);
330 } 341 }
331 342
332 /* 343 /*
@@ -475,6 +486,30 @@ void consume_skb(struct sk_buff *skb)
475EXPORT_SYMBOL(consume_skb); 486EXPORT_SYMBOL(consume_skb);
476 487
477/** 488/**
489 * skb_recycle - clean up an skb for reuse
490 * @skb: buffer
491 *
492 * Recycles the skb to be reused as a receive buffer. This
493 * function does any necessary reference count dropping, and
494 * cleans up the skbuff as if it just came from __alloc_skb().
495 */
496void skb_recycle(struct sk_buff *skb)
497{
498 struct skb_shared_info *shinfo;
499
500 skb_release_head_state(skb);
501
502 shinfo = skb_shinfo(skb);
503 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
504 atomic_set(&shinfo->dataref, 1);
505
506 memset(skb, 0, offsetof(struct sk_buff, tail));
507 skb->data = skb->head + NET_SKB_PAD;
508 skb_reset_tail_pointer(skb);
509}
510EXPORT_SYMBOL(skb_recycle);
511
512/**
478 * skb_recycle_check - check if skb can be reused for receive 513 * skb_recycle_check - check if skb can be reused for receive
479 * @skb: buffer 514 * @skb: buffer
480 * @skb_size: minimum receive buffer size 515 * @skb_size: minimum receive buffer size
@@ -488,33 +523,10 @@ EXPORT_SYMBOL(consume_skb);
488 */ 523 */
489bool skb_recycle_check(struct sk_buff *skb, int skb_size) 524bool skb_recycle_check(struct sk_buff *skb, int skb_size)
490{ 525{
491 struct skb_shared_info *shinfo; 526 if (!skb_is_recycleable(skb, skb_size))
492
493 if (irqs_disabled())
494 return false;
495
496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
497 return false;
498
499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
500 return false;
501
502 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
503 if (skb_end_pointer(skb) - skb->head < skb_size)
504 return false; 527 return false;
505 528
506 if (skb_shared(skb) || skb_cloned(skb)) 529 skb_recycle(skb);
507 return false;
508
509 skb_release_head_state(skb);
510
511 shinfo = skb_shinfo(skb);
512 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
513 atomic_set(&shinfo->dataref, 1);
514
515 memset(skb, 0, offsetof(struct sk_buff, tail));
516 skb->data = skb->head + NET_SKB_PAD;
517 skb_reset_tail_pointer(skb);
518 530
519 return true; 531 return true;
520} 532}
@@ -529,6 +541,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
529 new->mac_header = old->mac_header; 541 new->mac_header = old->mac_header;
530 skb_dst_copy(new, old); 542 skb_dst_copy(new, old);
531 new->rxhash = old->rxhash; 543 new->rxhash = old->rxhash;
544 new->ooo_okay = old->ooo_okay;
545 new->l4_rxhash = old->l4_rxhash;
532#ifdef CONFIG_XFRM 546#ifdef CONFIG_XFRM
533 new->sp = secpath_get(old->sp); 547 new->sp = secpath_get(old->sp);
534#endif 548#endif
@@ -611,8 +625,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
611} 625}
612EXPORT_SYMBOL_GPL(skb_morph); 626EXPORT_SYMBOL_GPL(skb_morph);
613 627
614/* skb frags copy userspace buffers to kernel */ 628/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
615static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 629 * @skb: the skb to modify
630 * @gfp_mask: allocation priority
631 *
632 * This must be called on SKBTX_DEV_ZEROCOPY skb.
633 * It will copy all frags into kernel and drop the reference
634 * to userspace pages.
635 *
636 * If this function is called from an interrupt gfp_mask() must be
637 * %GFP_ATOMIC.
638 *
639 * Returns 0 on success or a negative error code on failure
640 * to allocate kernel memory to copy to.
641 */
642int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
616{ 643{
617 int i; 644 int i;
618 int num_frags = skb_shinfo(skb)->nr_frags; 645 int num_frags = skb_shinfo(skb)->nr_frags;
@@ -634,7 +661,7 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
634 } 661 }
635 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 662 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
636 memcpy(page_address(page), 663 memcpy(page_address(page),
637 vaddr + f->page_offset, f->size); 664 vaddr + f->page_offset, skb_frag_size(f));
638 kunmap_skb_frag(vaddr); 665 kunmap_skb_frag(vaddr);
639 page->private = (unsigned long)head; 666 page->private = (unsigned long)head;
640 head = page; 667 head = page;
@@ -642,16 +669,18 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
642 669
643 /* skb frags release userspace buffers */ 670 /* skb frags release userspace buffers */
644 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
645 put_page(skb_shinfo(skb)->frags[i].page); 672 skb_frag_unref(skb, i);
646 673
647 uarg->callback(uarg); 674 uarg->callback(uarg);
648 675
649 /* skb frags point to kernel buffers */ 676 /* skb frags point to kernel buffers */
650 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 677 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
651 skb_shinfo(skb)->frags[i - 1].page_offset = 0; 678 __skb_fill_page_desc(skb, i-1, head, 0,
652 skb_shinfo(skb)->frags[i - 1].page = head; 679 skb_shinfo(skb)->frags[i - 1].size);
653 head = (struct page *)head->private; 680 head = (struct page *)head->private;
654 } 681 }
682
683 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
655 return 0; 684 return 0;
656} 685}
657 686
@@ -677,7 +706,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 706 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
678 if (skb_copy_ubufs(skb, gfp_mask)) 707 if (skb_copy_ubufs(skb, gfp_mask))
679 return NULL; 708 return NULL;
680 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
681 } 709 }
682 710
683 n = skb + 1; 711 n = skb + 1;
@@ -803,11 +831,10 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
803 n = NULL; 831 n = NULL;
804 goto out; 832 goto out;
805 } 833 }
806 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
807 } 834 }
808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 835 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
809 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 836 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
810 get_page(skb_shinfo(n)->frags[i].page); 837 skb_frag_ref(skb, i);
811 } 838 }
812 skb_shinfo(n)->nr_frags = i; 839 skb_shinfo(n)->nr_frags = i;
813 } 840 }
@@ -896,10 +923,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 923 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
897 if (skb_copy_ubufs(skb, gfp_mask)) 924 if (skb_copy_ubufs(skb, gfp_mask))
898 goto nofrags; 925 goto nofrags;
899 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
900 } 926 }
901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
902 get_page(skb_shinfo(skb)->frags[i].page); 928 skb_frag_ref(skb, i);
903 929
904 if (skb_has_frag_list(skb)) 930 if (skb_has_frag_list(skb))
905 skb_clone_fraglist(skb); 931 skb_clone_fraglist(skb);
@@ -1166,20 +1192,20 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1166 goto drop_pages; 1192 goto drop_pages;
1167 1193
1168 for (; i < nfrags; i++) { 1194 for (; i < nfrags; i++) {
1169 int end = offset + skb_shinfo(skb)->frags[i].size; 1195 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1170 1196
1171 if (end < len) { 1197 if (end < len) {
1172 offset = end; 1198 offset = end;
1173 continue; 1199 continue;
1174 } 1200 }
1175 1201
1176 skb_shinfo(skb)->frags[i++].size = len - offset; 1202 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1177 1203
1178drop_pages: 1204drop_pages:
1179 skb_shinfo(skb)->nr_frags = i; 1205 skb_shinfo(skb)->nr_frags = i;
1180 1206
1181 for (; i < nfrags; i++) 1207 for (; i < nfrags; i++)
1182 put_page(skb_shinfo(skb)->frags[i].page); 1208 skb_frag_unref(skb, i);
1183 1209
1184 if (skb_has_frag_list(skb)) 1210 if (skb_has_frag_list(skb))
1185 skb_drop_fraglist(skb); 1211 skb_drop_fraglist(skb);
@@ -1282,9 +1308,11 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1282 /* Estimate size of pulled pages. */ 1308 /* Estimate size of pulled pages. */
1283 eat = delta; 1309 eat = delta;
1284 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1310 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1285 if (skb_shinfo(skb)->frags[i].size >= eat) 1311 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1312
1313 if (size >= eat)
1286 goto pull_pages; 1314 goto pull_pages;
1287 eat -= skb_shinfo(skb)->frags[i].size; 1315 eat -= size;
1288 } 1316 }
1289 1317
1290 /* If we need update frag list, we are in troubles. 1318 /* If we need update frag list, we are in troubles.
@@ -1347,14 +1375,16 @@ pull_pages:
1347 eat = delta; 1375 eat = delta;
1348 k = 0; 1376 k = 0;
1349 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1377 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1350 if (skb_shinfo(skb)->frags[i].size <= eat) { 1378 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1351 put_page(skb_shinfo(skb)->frags[i].page); 1379
1352 eat -= skb_shinfo(skb)->frags[i].size; 1380 if (size <= eat) {
1381 skb_frag_unref(skb, i);
1382 eat -= size;
1353 } else { 1383 } else {
1354 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1384 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1355 if (eat) { 1385 if (eat) {
1356 skb_shinfo(skb)->frags[k].page_offset += eat; 1386 skb_shinfo(skb)->frags[k].page_offset += eat;
1357 skb_shinfo(skb)->frags[k].size -= eat; 1387 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1358 eat = 0; 1388 eat = 0;
1359 } 1389 }
1360 k++; 1390 k++;
@@ -1409,7 +1439,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1409 1439
1410 WARN_ON(start > offset + len); 1440 WARN_ON(start > offset + len);
1411 1441
1412 end = start + skb_shinfo(skb)->frags[i].size; 1442 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1413 if ((copy = end - offset) > 0) { 1443 if ((copy = end - offset) > 0) {
1414 u8 *vaddr; 1444 u8 *vaddr;
1415 1445
@@ -1607,7 +1637,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1607 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1637 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1608 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1638 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1609 1639
1610 if (__splice_segment(f->page, f->page_offset, f->size, 1640 if (__splice_segment(skb_frag_page(f),
1641 f->page_offset, skb_frag_size(f),
1611 offset, len, skb, spd, 0, sk, pipe)) 1642 offset, len, skb, spd, 0, sk, pipe))
1612 return 1; 1643 return 1;
1613 } 1644 }
@@ -1717,7 +1748,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1717 1748
1718 WARN_ON(start > offset + len); 1749 WARN_ON(start > offset + len);
1719 1750
1720 end = start + frag->size; 1751 end = start + skb_frag_size(frag);
1721 if ((copy = end - offset) > 0) { 1752 if ((copy = end - offset) > 0) {
1722 u8 *vaddr; 1753 u8 *vaddr;
1723 1754
@@ -1790,7 +1821,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1790 1821
1791 WARN_ON(start > offset + len); 1822 WARN_ON(start > offset + len);
1792 1823
1793 end = start + skb_shinfo(skb)->frags[i].size; 1824 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1794 if ((copy = end - offset) > 0) { 1825 if ((copy = end - offset) > 0) {
1795 __wsum csum2; 1826 __wsum csum2;
1796 u8 *vaddr; 1827 u8 *vaddr;
@@ -1865,7 +1896,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1865 1896
1866 WARN_ON(start > offset + len); 1897 WARN_ON(start > offset + len);
1867 1898
1868 end = start + skb_shinfo(skb)->frags[i].size; 1899 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1869 if ((copy = end - offset) > 0) { 1900 if ((copy = end - offset) > 0) {
1870 __wsum csum2; 1901 __wsum csum2;
1871 u8 *vaddr; 1902 u8 *vaddr;
@@ -2138,7 +2169,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2138 skb->data_len = len - pos; 2169 skb->data_len = len - pos;
2139 2170
2140 for (i = 0; i < nfrags; i++) { 2171 for (i = 0; i < nfrags; i++) {
2141 int size = skb_shinfo(skb)->frags[i].size; 2172 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2142 2173
2143 if (pos + size > len) { 2174 if (pos + size > len) {
2144 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2175 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -2152,10 +2183,10 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2152 * where splitting is expensive. 2183 * where splitting is expensive.
2153 * 2. Split is accurately. We make this. 2184 * 2. Split is accurately. We make this.
2154 */ 2185 */
2155 get_page(skb_shinfo(skb)->frags[i].page); 2186 skb_frag_ref(skb, i);
2156 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2187 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2157 skb_shinfo(skb1)->frags[0].size -= len - pos; 2188 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2158 skb_shinfo(skb)->frags[i].size = len - pos; 2189 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2159 skb_shinfo(skb)->nr_frags++; 2190 skb_shinfo(skb)->nr_frags++;
2160 } 2191 }
2161 k++; 2192 k++;
@@ -2227,12 +2258,13 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2227 * commit all, so that we don't have to undo partial changes 2258 * commit all, so that we don't have to undo partial changes
2228 */ 2259 */
2229 if (!to || 2260 if (!to ||
2230 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2261 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2262 fragfrom->page_offset)) {
2231 merge = -1; 2263 merge = -1;
2232 } else { 2264 } else {
2233 merge = to - 1; 2265 merge = to - 1;
2234 2266
2235 todo -= fragfrom->size; 2267 todo -= skb_frag_size(fragfrom);
2236 if (todo < 0) { 2268 if (todo < 0) {
2237 if (skb_prepare_for_shift(skb) || 2269 if (skb_prepare_for_shift(skb) ||
2238 skb_prepare_for_shift(tgt)) 2270 skb_prepare_for_shift(tgt))
@@ -2242,8 +2274,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2242 fragfrom = &skb_shinfo(skb)->frags[from]; 2274 fragfrom = &skb_shinfo(skb)->frags[from];
2243 fragto = &skb_shinfo(tgt)->frags[merge]; 2275 fragto = &skb_shinfo(tgt)->frags[merge];
2244 2276
2245 fragto->size += shiftlen; 2277 skb_frag_size_add(fragto, shiftlen);
2246 fragfrom->size -= shiftlen; 2278 skb_frag_size_sub(fragfrom, shiftlen);
2247 fragfrom->page_offset += shiftlen; 2279 fragfrom->page_offset += shiftlen;
2248 2280
2249 goto onlymerged; 2281 goto onlymerged;
@@ -2267,20 +2299,20 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2267 fragfrom = &skb_shinfo(skb)->frags[from]; 2299 fragfrom = &skb_shinfo(skb)->frags[from];
2268 fragto = &skb_shinfo(tgt)->frags[to]; 2300 fragto = &skb_shinfo(tgt)->frags[to];
2269 2301
2270 if (todo >= fragfrom->size) { 2302 if (todo >= skb_frag_size(fragfrom)) {
2271 *fragto = *fragfrom; 2303 *fragto = *fragfrom;
2272 todo -= fragfrom->size; 2304 todo -= skb_frag_size(fragfrom);
2273 from++; 2305 from++;
2274 to++; 2306 to++;
2275 2307
2276 } else { 2308 } else {
2277 get_page(fragfrom->page); 2309 __skb_frag_ref(fragfrom);
2278 fragto->page = fragfrom->page; 2310 fragto->page = fragfrom->page;
2279 fragto->page_offset = fragfrom->page_offset; 2311 fragto->page_offset = fragfrom->page_offset;
2280 fragto->size = todo; 2312 skb_frag_size_set(fragto, todo);
2281 2313
2282 fragfrom->page_offset += todo; 2314 fragfrom->page_offset += todo;
2283 fragfrom->size -= todo; 2315 skb_frag_size_sub(fragfrom, todo);
2284 todo = 0; 2316 todo = 0;
2285 2317
2286 to++; 2318 to++;
@@ -2295,8 +2327,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2295 fragfrom = &skb_shinfo(skb)->frags[0]; 2327 fragfrom = &skb_shinfo(skb)->frags[0];
2296 fragto = &skb_shinfo(tgt)->frags[merge]; 2328 fragto = &skb_shinfo(tgt)->frags[merge];
2297 2329
2298 fragto->size += fragfrom->size; 2330 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2299 put_page(fragfrom->page); 2331 __skb_frag_unref(fragfrom);
2300 } 2332 }
2301 2333
2302 /* Reposition in the original skb */ 2334 /* Reposition in the original skb */
@@ -2393,7 +2425,7 @@ next_skb:
2393 2425
2394 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2426 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2395 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2427 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2396 block_limit = frag->size + st->stepped_offset; 2428 block_limit = skb_frag_size(frag) + st->stepped_offset;
2397 2429
2398 if (abs_offset < block_limit) { 2430 if (abs_offset < block_limit) {
2399 if (!st->frag_data) 2431 if (!st->frag_data)
@@ -2411,7 +2443,7 @@ next_skb:
2411 } 2443 }
2412 2444
2413 st->frag_idx++; 2445 st->frag_idx++;
2414 st->stepped_offset += frag->size; 2446 st->stepped_offset += skb_frag_size(frag);
2415 } 2447 }
2416 2448
2417 if (st->frag_data) { 2449 if (st->frag_data) {
@@ -2541,14 +2573,13 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2541 left = PAGE_SIZE - frag->page_offset; 2573 left = PAGE_SIZE - frag->page_offset;
2542 copy = (length > left)? left : length; 2574 copy = (length > left)? left : length;
2543 2575
2544 ret = getfrag(from, (page_address(frag->page) + 2576 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2545 frag->page_offset + frag->size),
2546 offset, copy, 0, skb); 2577 offset, copy, 0, skb);
2547 if (ret < 0) 2578 if (ret < 0)
2548 return -EFAULT; 2579 return -EFAULT;
2549 2580
2550 /* copy was successful so update the size parameters */ 2581 /* copy was successful so update the size parameters */
2551 frag->size += copy; 2582 skb_frag_size_add(frag, copy);
2552 skb->len += copy; 2583 skb->len += copy;
2553 skb->data_len += copy; 2584 skb->data_len += copy;
2554 offset += copy; 2585 offset += copy;
@@ -2694,12 +2725,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2694 2725
2695 while (pos < offset + len && i < nfrags) { 2726 while (pos < offset + len && i < nfrags) {
2696 *frag = skb_shinfo(skb)->frags[i]; 2727 *frag = skb_shinfo(skb)->frags[i];
2697 get_page(frag->page); 2728 __skb_frag_ref(frag);
2698 size = frag->size; 2729 size = skb_frag_size(frag);
2699 2730
2700 if (pos < offset) { 2731 if (pos < offset) {
2701 frag->page_offset += offset - pos; 2732 frag->page_offset += offset - pos;
2702 frag->size -= offset - pos; 2733 skb_frag_size_sub(frag, offset - pos);
2703 } 2734 }
2704 2735
2705 skb_shinfo(nskb)->nr_frags++; 2736 skb_shinfo(nskb)->nr_frags++;
@@ -2708,7 +2739,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2708 i++; 2739 i++;
2709 pos += size; 2740 pos += size;
2710 } else { 2741 } else {
2711 frag->size -= pos + size - (offset + len); 2742 skb_frag_size_sub(frag, pos + size - (offset + len));
2712 goto skip_fraglist; 2743 goto skip_fraglist;
2713 } 2744 }
2714 2745
@@ -2788,7 +2819,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2788 } while (--i); 2819 } while (--i);
2789 2820
2790 frag->page_offset += offset; 2821 frag->page_offset += offset;
2791 frag->size -= offset; 2822 skb_frag_size_sub(frag, offset);
2792 2823
2793 skb->truesize -= skb->data_len; 2824 skb->truesize -= skb->data_len;
2794 skb->len -= skb->data_len; 2825 skb->len -= skb->data_len;
@@ -2840,7 +2871,7 @@ merge:
2840 unsigned int eat = offset - headlen; 2871 unsigned int eat = offset - headlen;
2841 2872
2842 skbinfo->frags[0].page_offset += eat; 2873 skbinfo->frags[0].page_offset += eat;
2843 skbinfo->frags[0].size -= eat; 2874 skb_frag_size_sub(&skbinfo->frags[0], eat);
2844 skb->data_len -= eat; 2875 skb->data_len -= eat;
2845 skb->len -= eat; 2876 skb->len -= eat;
2846 offset = headlen; 2877 offset = headlen;
@@ -2911,13 +2942,13 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2911 2942
2912 WARN_ON(start > offset + len); 2943 WARN_ON(start > offset + len);
2913 2944
2914 end = start + skb_shinfo(skb)->frags[i].size; 2945 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2915 if ((copy = end - offset) > 0) { 2946 if ((copy = end - offset) > 0) {
2916 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2947 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2917 2948
2918 if (copy > len) 2949 if (copy > len)
2919 copy = len; 2950 copy = len;
2920 sg_set_page(&sg[elt], frag->page, copy, 2951 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
2921 frag->page_offset+offset-start); 2952 frag->page_offset+offset-start);
2922 elt++; 2953 elt++;
2923 if (!(len -= copy)) 2954 if (!(len -= copy))