aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c173
1 files changed, 96 insertions, 77 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 387703f56fce..18a3cebb753d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -184,11 +184,21 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
184 goto out; 184 goto out;
185 prefetchw(skb); 185 prefetchw(skb);
186 186
187 /* We do our best to align skb_shared_info on a separate cache
188 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
189 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
190 * Both skb->head and skb_shared_info are cache line aligned.
191 */
187 size = SKB_DATA_ALIGN(size); 192 size = SKB_DATA_ALIGN(size);
188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 193 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
189 gfp_mask, node); 194 data = kmalloc_node_track_caller(size, gfp_mask, node);
190 if (!data) 195 if (!data)
191 goto nodata; 196 goto nodata;
197 /* kmalloc(size) might give us more room than requested.
198 * Put skb_shared_info exactly at the end of allocated zone,
199 * to allow max possible filling before reallocation.
200 */
201 size = SKB_WITH_OVERHEAD(ksize(data));
192 prefetchw(data + size); 202 prefetchw(data + size);
193 203
194 /* 204 /*
@@ -197,7 +207,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
197 * the tail pointer in struct sk_buff! 207 * the tail pointer in struct sk_buff!
198 */ 208 */
199 memset(skb, 0, offsetof(struct sk_buff, tail)); 209 memset(skb, 0, offsetof(struct sk_buff, tail));
200 skb->truesize = size + sizeof(struct sk_buff); 210 /* Account for allocated memory : skb + skb->head */
211 skb->truesize = SKB_TRUESIZE(size);
201 atomic_set(&skb->users, 1); 212 atomic_set(&skb->users, 1);
202 skb->head = data; 213 skb->head = data;
203 skb->data = data; 214 skb->data = data;
@@ -326,7 +337,7 @@ static void skb_release_data(struct sk_buff *skb)
326 if (skb_shinfo(skb)->nr_frags) { 337 if (skb_shinfo(skb)->nr_frags) {
327 int i; 338 int i;
328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 339 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
329 put_page(skb_shinfo(skb)->frags[i].page); 340 skb_frag_unref(skb, i);
330 } 341 }
331 342
332 /* 343 /*
@@ -475,6 +486,30 @@ void consume_skb(struct sk_buff *skb)
475EXPORT_SYMBOL(consume_skb); 486EXPORT_SYMBOL(consume_skb);
476 487
477/** 488/**
489 * skb_recycle - clean up an skb for reuse
490 * @skb: buffer
491 *
492 * Recycles the skb to be reused as a receive buffer. This
493 * function does any necessary reference count dropping, and
494 * cleans up the skbuff as if it just came from __alloc_skb().
495 */
496void skb_recycle(struct sk_buff *skb)
497{
498 struct skb_shared_info *shinfo;
499
500 skb_release_head_state(skb);
501
502 shinfo = skb_shinfo(skb);
503 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
504 atomic_set(&shinfo->dataref, 1);
505
506 memset(skb, 0, offsetof(struct sk_buff, tail));
507 skb->data = skb->head + NET_SKB_PAD;
508 skb_reset_tail_pointer(skb);
509}
510EXPORT_SYMBOL(skb_recycle);
511
512/**
478 * skb_recycle_check - check if skb can be reused for receive 513 * skb_recycle_check - check if skb can be reused for receive
479 * @skb: buffer 514 * @skb: buffer
480 * @skb_size: minimum receive buffer size 515 * @skb_size: minimum receive buffer size
@@ -488,33 +523,10 @@ EXPORT_SYMBOL(consume_skb);
488 */ 523 */
489bool skb_recycle_check(struct sk_buff *skb, int skb_size) 524bool skb_recycle_check(struct sk_buff *skb, int skb_size)
490{ 525{
491 struct skb_shared_info *shinfo; 526 if (!skb_is_recycleable(skb, skb_size))
492
493 if (irqs_disabled())
494 return false;
495
496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
497 return false;
498
499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
500 return false; 527 return false;
501 528
502 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 529 skb_recycle(skb);
503 if (skb_end_pointer(skb) - skb->head < skb_size)
504 return false;
505
506 if (skb_shared(skb) || skb_cloned(skb))
507 return false;
508
509 skb_release_head_state(skb);
510
511 shinfo = skb_shinfo(skb);
512 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
513 atomic_set(&shinfo->dataref, 1);
514
515 memset(skb, 0, offsetof(struct sk_buff, tail));
516 skb->data = skb->head + NET_SKB_PAD;
517 skb_reset_tail_pointer(skb);
518 530
519 return true; 531 return true;
520} 532}
@@ -529,6 +541,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
529 new->mac_header = old->mac_header; 541 new->mac_header = old->mac_header;
530 skb_dst_copy(new, old); 542 skb_dst_copy(new, old);
531 new->rxhash = old->rxhash; 543 new->rxhash = old->rxhash;
544 new->ooo_okay = old->ooo_okay;
545 new->l4_rxhash = old->l4_rxhash;
532#ifdef CONFIG_XFRM 546#ifdef CONFIG_XFRM
533 new->sp = secpath_get(old->sp); 547 new->sp = secpath_get(old->sp);
534#endif 548#endif
@@ -647,7 +661,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
647 } 661 }
648 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 662 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
649 memcpy(page_address(page), 663 memcpy(page_address(page),
650 vaddr + f->page_offset, f->size); 664 vaddr + f->page_offset, skb_frag_size(f));
651 kunmap_skb_frag(vaddr); 665 kunmap_skb_frag(vaddr);
652 page->private = (unsigned long)head; 666 page->private = (unsigned long)head;
653 head = page; 667 head = page;
@@ -655,14 +669,14 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
655 669
656 /* skb frags release userspace buffers */ 670 /* skb frags release userspace buffers */
657 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
658 put_page(skb_shinfo(skb)->frags[i].page); 672 skb_frag_unref(skb, i);
659 673
660 uarg->callback(uarg); 674 uarg->callback(uarg);
661 675
662 /* skb frags point to kernel buffers */ 676 /* skb frags point to kernel buffers */
663 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 677 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
664 skb_shinfo(skb)->frags[i - 1].page_offset = 0; 678 __skb_fill_page_desc(skb, i-1, head, 0,
665 skb_shinfo(skb)->frags[i - 1].page = head; 679 skb_shinfo(skb)->frags[i - 1].size);
666 head = (struct page *)head->private; 680 head = (struct page *)head->private;
667 } 681 }
668 682
@@ -820,7 +834,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
820 } 834 }
821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 835 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
822 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 836 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
823 get_page(skb_shinfo(n)->frags[i].page); 837 skb_frag_ref(skb, i);
824 } 838 }
825 skb_shinfo(n)->nr_frags = i; 839 skb_shinfo(n)->nr_frags = i;
826 } 840 }
@@ -911,7 +925,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
911 goto nofrags; 925 goto nofrags;
912 } 926 }
913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
914 get_page(skb_shinfo(skb)->frags[i].page); 928 skb_frag_ref(skb, i);
915 929
916 if (skb_has_frag_list(skb)) 930 if (skb_has_frag_list(skb))
917 skb_clone_fraglist(skb); 931 skb_clone_fraglist(skb);
@@ -1178,20 +1192,20 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1178 goto drop_pages; 1192 goto drop_pages;
1179 1193
1180 for (; i < nfrags; i++) { 1194 for (; i < nfrags; i++) {
1181 int end = offset + skb_shinfo(skb)->frags[i].size; 1195 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1182 1196
1183 if (end < len) { 1197 if (end < len) {
1184 offset = end; 1198 offset = end;
1185 continue; 1199 continue;
1186 } 1200 }
1187 1201
1188 skb_shinfo(skb)->frags[i++].size = len - offset; 1202 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1189 1203
1190drop_pages: 1204drop_pages:
1191 skb_shinfo(skb)->nr_frags = i; 1205 skb_shinfo(skb)->nr_frags = i;
1192 1206
1193 for (; i < nfrags; i++) 1207 for (; i < nfrags; i++)
1194 put_page(skb_shinfo(skb)->frags[i].page); 1208 skb_frag_unref(skb, i);
1195 1209
1196 if (skb_has_frag_list(skb)) 1210 if (skb_has_frag_list(skb))
1197 skb_drop_fraglist(skb); 1211 skb_drop_fraglist(skb);
@@ -1294,9 +1308,11 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1294 /* Estimate size of pulled pages. */ 1308 /* Estimate size of pulled pages. */
1295 eat = delta; 1309 eat = delta;
1296 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1310 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1297 if (skb_shinfo(skb)->frags[i].size >= eat) 1311 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1312
1313 if (size >= eat)
1298 goto pull_pages; 1314 goto pull_pages;
1299 eat -= skb_shinfo(skb)->frags[i].size; 1315 eat -= size;
1300 } 1316 }
1301 1317
1302 /* If we need update frag list, we are in troubles. 1318 /* If we need update frag list, we are in troubles.
@@ -1359,14 +1375,16 @@ pull_pages:
1359 eat = delta; 1375 eat = delta;
1360 k = 0; 1376 k = 0;
1361 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1377 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1362 if (skb_shinfo(skb)->frags[i].size <= eat) { 1378 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1363 put_page(skb_shinfo(skb)->frags[i].page); 1379
1364 eat -= skb_shinfo(skb)->frags[i].size; 1380 if (size <= eat) {
1381 skb_frag_unref(skb, i);
1382 eat -= size;
1365 } else { 1383 } else {
1366 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1384 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1367 if (eat) { 1385 if (eat) {
1368 skb_shinfo(skb)->frags[k].page_offset += eat; 1386 skb_shinfo(skb)->frags[k].page_offset += eat;
1369 skb_shinfo(skb)->frags[k].size -= eat; 1387 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1370 eat = 0; 1388 eat = 0;
1371 } 1389 }
1372 k++; 1390 k++;
@@ -1421,7 +1439,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1421 1439
1422 WARN_ON(start > offset + len); 1440 WARN_ON(start > offset + len);
1423 1441
1424 end = start + skb_shinfo(skb)->frags[i].size; 1442 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1425 if ((copy = end - offset) > 0) { 1443 if ((copy = end - offset) > 0) {
1426 u8 *vaddr; 1444 u8 *vaddr;
1427 1445
@@ -1619,7 +1637,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1619 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1637 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1620 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1638 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1621 1639
1622 if (__splice_segment(f->page, f->page_offset, f->size, 1640 if (__splice_segment(skb_frag_page(f),
1641 f->page_offset, skb_frag_size(f),
1623 offset, len, skb, spd, 0, sk, pipe)) 1642 offset, len, skb, spd, 0, sk, pipe))
1624 return 1; 1643 return 1;
1625 } 1644 }
@@ -1729,7 +1748,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1729 1748
1730 WARN_ON(start > offset + len); 1749 WARN_ON(start > offset + len);
1731 1750
1732 end = start + frag->size; 1751 end = start + skb_frag_size(frag);
1733 if ((copy = end - offset) > 0) { 1752 if ((copy = end - offset) > 0) {
1734 u8 *vaddr; 1753 u8 *vaddr;
1735 1754
@@ -1802,7 +1821,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1802 1821
1803 WARN_ON(start > offset + len); 1822 WARN_ON(start > offset + len);
1804 1823
1805 end = start + skb_shinfo(skb)->frags[i].size; 1824 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1806 if ((copy = end - offset) > 0) { 1825 if ((copy = end - offset) > 0) {
1807 __wsum csum2; 1826 __wsum csum2;
1808 u8 *vaddr; 1827 u8 *vaddr;
@@ -1877,7 +1896,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1877 1896
1878 WARN_ON(start > offset + len); 1897 WARN_ON(start > offset + len);
1879 1898
1880 end = start + skb_shinfo(skb)->frags[i].size; 1899 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1881 if ((copy = end - offset) > 0) { 1900 if ((copy = end - offset) > 0) {
1882 __wsum csum2; 1901 __wsum csum2;
1883 u8 *vaddr; 1902 u8 *vaddr;
@@ -2150,7 +2169,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2150 skb->data_len = len - pos; 2169 skb->data_len = len - pos;
2151 2170
2152 for (i = 0; i < nfrags; i++) { 2171 for (i = 0; i < nfrags; i++) {
2153 int size = skb_shinfo(skb)->frags[i].size; 2172 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2154 2173
2155 if (pos + size > len) { 2174 if (pos + size > len) {
2156 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2175 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -2164,10 +2183,10 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2164 * where splitting is expensive. 2183 * where splitting is expensive.
2165 * 2. Split is accurately. We make this. 2184 * 2. Split is accurately. We make this.
2166 */ 2185 */
2167 get_page(skb_shinfo(skb)->frags[i].page); 2186 skb_frag_ref(skb, i);
2168 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2187 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2169 skb_shinfo(skb1)->frags[0].size -= len - pos; 2188 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2170 skb_shinfo(skb)->frags[i].size = len - pos; 2189 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2171 skb_shinfo(skb)->nr_frags++; 2190 skb_shinfo(skb)->nr_frags++;
2172 } 2191 }
2173 k++; 2192 k++;
@@ -2239,12 +2258,13 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2239 * commit all, so that we don't have to undo partial changes 2258 * commit all, so that we don't have to undo partial changes
2240 */ 2259 */
2241 if (!to || 2260 if (!to ||
2242 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2261 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2262 fragfrom->page_offset)) {
2243 merge = -1; 2263 merge = -1;
2244 } else { 2264 } else {
2245 merge = to - 1; 2265 merge = to - 1;
2246 2266
2247 todo -= fragfrom->size; 2267 todo -= skb_frag_size(fragfrom);
2248 if (todo < 0) { 2268 if (todo < 0) {
2249 if (skb_prepare_for_shift(skb) || 2269 if (skb_prepare_for_shift(skb) ||
2250 skb_prepare_for_shift(tgt)) 2270 skb_prepare_for_shift(tgt))
@@ -2254,8 +2274,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2254 fragfrom = &skb_shinfo(skb)->frags[from]; 2274 fragfrom = &skb_shinfo(skb)->frags[from];
2255 fragto = &skb_shinfo(tgt)->frags[merge]; 2275 fragto = &skb_shinfo(tgt)->frags[merge];
2256 2276
2257 fragto->size += shiftlen; 2277 skb_frag_size_add(fragto, shiftlen);
2258 fragfrom->size -= shiftlen; 2278 skb_frag_size_sub(fragfrom, shiftlen);
2259 fragfrom->page_offset += shiftlen; 2279 fragfrom->page_offset += shiftlen;
2260 2280
2261 goto onlymerged; 2281 goto onlymerged;
@@ -2279,20 +2299,20 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2279 fragfrom = &skb_shinfo(skb)->frags[from]; 2299 fragfrom = &skb_shinfo(skb)->frags[from];
2280 fragto = &skb_shinfo(tgt)->frags[to]; 2300 fragto = &skb_shinfo(tgt)->frags[to];
2281 2301
2282 if (todo >= fragfrom->size) { 2302 if (todo >= skb_frag_size(fragfrom)) {
2283 *fragto = *fragfrom; 2303 *fragto = *fragfrom;
2284 todo -= fragfrom->size; 2304 todo -= skb_frag_size(fragfrom);
2285 from++; 2305 from++;
2286 to++; 2306 to++;
2287 2307
2288 } else { 2308 } else {
2289 get_page(fragfrom->page); 2309 __skb_frag_ref(fragfrom);
2290 fragto->page = fragfrom->page; 2310 fragto->page = fragfrom->page;
2291 fragto->page_offset = fragfrom->page_offset; 2311 fragto->page_offset = fragfrom->page_offset;
2292 fragto->size = todo; 2312 skb_frag_size_set(fragto, todo);
2293 2313
2294 fragfrom->page_offset += todo; 2314 fragfrom->page_offset += todo;
2295 fragfrom->size -= todo; 2315 skb_frag_size_sub(fragfrom, todo);
2296 todo = 0; 2316 todo = 0;
2297 2317
2298 to++; 2318 to++;
@@ -2307,8 +2327,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2307 fragfrom = &skb_shinfo(skb)->frags[0]; 2327 fragfrom = &skb_shinfo(skb)->frags[0];
2308 fragto = &skb_shinfo(tgt)->frags[merge]; 2328 fragto = &skb_shinfo(tgt)->frags[merge];
2309 2329
2310 fragto->size += fragfrom->size; 2330 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2311 put_page(fragfrom->page); 2331 __skb_frag_unref(fragfrom);
2312 } 2332 }
2313 2333
2314 /* Reposition in the original skb */ 2334 /* Reposition in the original skb */
@@ -2405,7 +2425,7 @@ next_skb:
2405 2425
2406 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2426 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2407 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2427 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2408 block_limit = frag->size + st->stepped_offset; 2428 block_limit = skb_frag_size(frag) + st->stepped_offset;
2409 2429
2410 if (abs_offset < block_limit) { 2430 if (abs_offset < block_limit) {
2411 if (!st->frag_data) 2431 if (!st->frag_data)
@@ -2423,7 +2443,7 @@ next_skb:
2423 } 2443 }
2424 2444
2425 st->frag_idx++; 2445 st->frag_idx++;
2426 st->stepped_offset += frag->size; 2446 st->stepped_offset += skb_frag_size(frag);
2427 } 2447 }
2428 2448
2429 if (st->frag_data) { 2449 if (st->frag_data) {
@@ -2553,14 +2573,13 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2553 left = PAGE_SIZE - frag->page_offset; 2573 left = PAGE_SIZE - frag->page_offset;
2554 copy = (length > left)? left : length; 2574 copy = (length > left)? left : length;
2555 2575
2556 ret = getfrag(from, (page_address(frag->page) + 2576 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2557 frag->page_offset + frag->size),
2558 offset, copy, 0, skb); 2577 offset, copy, 0, skb);
2559 if (ret < 0) 2578 if (ret < 0)
2560 return -EFAULT; 2579 return -EFAULT;
2561 2580
2562 /* copy was successful so update the size parameters */ 2581 /* copy was successful so update the size parameters */
2563 frag->size += copy; 2582 skb_frag_size_add(frag, copy);
2564 skb->len += copy; 2583 skb->len += copy;
2565 skb->data_len += copy; 2584 skb->data_len += copy;
2566 offset += copy; 2585 offset += copy;
@@ -2706,12 +2725,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2706 2725
2707 while (pos < offset + len && i < nfrags) { 2726 while (pos < offset + len && i < nfrags) {
2708 *frag = skb_shinfo(skb)->frags[i]; 2727 *frag = skb_shinfo(skb)->frags[i];
2709 get_page(frag->page); 2728 __skb_frag_ref(frag);
2710 size = frag->size; 2729 size = skb_frag_size(frag);
2711 2730
2712 if (pos < offset) { 2731 if (pos < offset) {
2713 frag->page_offset += offset - pos; 2732 frag->page_offset += offset - pos;
2714 frag->size -= offset - pos; 2733 skb_frag_size_sub(frag, offset - pos);
2715 } 2734 }
2716 2735
2717 skb_shinfo(nskb)->nr_frags++; 2736 skb_shinfo(nskb)->nr_frags++;
@@ -2720,7 +2739,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2720 i++; 2739 i++;
2721 pos += size; 2740 pos += size;
2722 } else { 2741 } else {
2723 frag->size -= pos + size - (offset + len); 2742 skb_frag_size_sub(frag, pos + size - (offset + len));
2724 goto skip_fraglist; 2743 goto skip_fraglist;
2725 } 2744 }
2726 2745
@@ -2800,7 +2819,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2800 } while (--i); 2819 } while (--i);
2801 2820
2802 frag->page_offset += offset; 2821 frag->page_offset += offset;
2803 frag->size -= offset; 2822 skb_frag_size_sub(frag, offset);
2804 2823
2805 skb->truesize -= skb->data_len; 2824 skb->truesize -= skb->data_len;
2806 skb->len -= skb->data_len; 2825 skb->len -= skb->data_len;
@@ -2852,7 +2871,7 @@ merge:
2852 unsigned int eat = offset - headlen; 2871 unsigned int eat = offset - headlen;
2853 2872
2854 skbinfo->frags[0].page_offset += eat; 2873 skbinfo->frags[0].page_offset += eat;
2855 skbinfo->frags[0].size -= eat; 2874 skb_frag_size_sub(&skbinfo->frags[0], eat);
2856 skb->data_len -= eat; 2875 skb->data_len -= eat;
2857 skb->len -= eat; 2876 skb->len -= eat;
2858 offset = headlen; 2877 offset = headlen;
@@ -2923,13 +2942,13 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2923 2942
2924 WARN_ON(start > offset + len); 2943 WARN_ON(start > offset + len);
2925 2944
2926 end = start + skb_shinfo(skb)->frags[i].size; 2945 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2927 if ((copy = end - offset) > 0) { 2946 if ((copy = end - offset) > 0) {
2928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2947 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2929 2948
2930 if (copy > len) 2949 if (copy > len)
2931 copy = len; 2950 copy = len;
2932 sg_set_page(&sg[elt], frag->page, copy, 2951 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
2933 frag->page_offset+offset-start); 2952 frag->page_offset+offset-start);
2934 elt++; 2953 elt++;
2935 if (!(len -= copy)) 2954 if (!(len -= copy))