aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c174
1 files changed, 96 insertions, 78 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 387703f56fce..ca4db40e75b8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -184,11 +184,20 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
184 goto out; 184 goto out;
185 prefetchw(skb); 185 prefetchw(skb);
186 186
187 size = SKB_DATA_ALIGN(size); 187 /* We do our best to align skb_shared_info on a separate cache
188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 188 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
189 gfp_mask, node); 189 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
190 * Both skb->head and skb_shared_info are cache line aligned.
191 */
192 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
193 data = kmalloc_node_track_caller(size, gfp_mask, node);
190 if (!data) 194 if (!data)
191 goto nodata; 195 goto nodata;
196 /* kmalloc(size) might give us more room than requested.
197 * Put skb_shared_info exactly at the end of allocated zone,
198 * to allow max possible filling before reallocation.
199 */
200 size = SKB_WITH_OVERHEAD(ksize(data));
192 prefetchw(data + size); 201 prefetchw(data + size);
193 202
194 /* 203 /*
@@ -197,7 +206,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
197 * the tail pointer in struct sk_buff! 206 * the tail pointer in struct sk_buff!
198 */ 207 */
199 memset(skb, 0, offsetof(struct sk_buff, tail)); 208 memset(skb, 0, offsetof(struct sk_buff, tail));
200 skb->truesize = size + sizeof(struct sk_buff); 209 /* Account for allocated memory : skb + skb->head */
210 skb->truesize = SKB_TRUESIZE(size);
201 atomic_set(&skb->users, 1); 211 atomic_set(&skb->users, 1);
202 skb->head = data; 212 skb->head = data;
203 skb->data = data; 213 skb->data = data;
@@ -326,7 +336,7 @@ static void skb_release_data(struct sk_buff *skb)
326 if (skb_shinfo(skb)->nr_frags) { 336 if (skb_shinfo(skb)->nr_frags) {
327 int i; 337 int i;
328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
329 put_page(skb_shinfo(skb)->frags[i].page); 339 skb_frag_unref(skb, i);
330 } 340 }
331 341
332 /* 342 /*
@@ -475,6 +485,30 @@ void consume_skb(struct sk_buff *skb)
475EXPORT_SYMBOL(consume_skb); 485EXPORT_SYMBOL(consume_skb);
476 486
477/** 487/**
488 * skb_recycle - clean up an skb for reuse
489 * @skb: buffer
490 *
491 * Recycles the skb to be reused as a receive buffer. This
492 * function does any necessary reference count dropping, and
493 * cleans up the skbuff as if it just came from __alloc_skb().
494 */
495void skb_recycle(struct sk_buff *skb)
496{
497 struct skb_shared_info *shinfo;
498
499 skb_release_head_state(skb);
500
501 shinfo = skb_shinfo(skb);
502 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
503 atomic_set(&shinfo->dataref, 1);
504
505 memset(skb, 0, offsetof(struct sk_buff, tail));
506 skb->data = skb->head + NET_SKB_PAD;
507 skb_reset_tail_pointer(skb);
508}
509EXPORT_SYMBOL(skb_recycle);
510
511/**
478 * skb_recycle_check - check if skb can be reused for receive 512 * skb_recycle_check - check if skb can be reused for receive
479 * @skb: buffer 513 * @skb: buffer
480 * @skb_size: minimum receive buffer size 514 * @skb_size: minimum receive buffer size
@@ -488,33 +522,10 @@ EXPORT_SYMBOL(consume_skb);
488 */ 522 */
489bool skb_recycle_check(struct sk_buff *skb, int skb_size) 523bool skb_recycle_check(struct sk_buff *skb, int skb_size)
490{ 524{
491 struct skb_shared_info *shinfo; 525 if (!skb_is_recycleable(skb, skb_size))
492
493 if (irqs_disabled())
494 return false;
495
496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
497 return false;
498
499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
500 return false; 526 return false;
501 527
502 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 528 skb_recycle(skb);
503 if (skb_end_pointer(skb) - skb->head < skb_size)
504 return false;
505
506 if (skb_shared(skb) || skb_cloned(skb))
507 return false;
508
509 skb_release_head_state(skb);
510
511 shinfo = skb_shinfo(skb);
512 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
513 atomic_set(&shinfo->dataref, 1);
514
515 memset(skb, 0, offsetof(struct sk_buff, tail));
516 skb->data = skb->head + NET_SKB_PAD;
517 skb_reset_tail_pointer(skb);
518 529
519 return true; 530 return true;
520} 531}
@@ -529,6 +540,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
529 new->mac_header = old->mac_header; 540 new->mac_header = old->mac_header;
530 skb_dst_copy(new, old); 541 skb_dst_copy(new, old);
531 new->rxhash = old->rxhash; 542 new->rxhash = old->rxhash;
543 new->ooo_okay = old->ooo_okay;
544 new->l4_rxhash = old->l4_rxhash;
532#ifdef CONFIG_XFRM 545#ifdef CONFIG_XFRM
533 new->sp = secpath_get(old->sp); 546 new->sp = secpath_get(old->sp);
534#endif 547#endif
@@ -647,7 +660,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
647 } 660 }
648 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 661 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
649 memcpy(page_address(page), 662 memcpy(page_address(page),
650 vaddr + f->page_offset, f->size); 663 vaddr + f->page_offset, skb_frag_size(f));
651 kunmap_skb_frag(vaddr); 664 kunmap_skb_frag(vaddr);
652 page->private = (unsigned long)head; 665 page->private = (unsigned long)head;
653 head = page; 666 head = page;
@@ -655,14 +668,14 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
655 668
656 /* skb frags release userspace buffers */ 669 /* skb frags release userspace buffers */
657 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 670 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
658 put_page(skb_shinfo(skb)->frags[i].page); 671 skb_frag_unref(skb, i);
659 672
660 uarg->callback(uarg); 673 uarg->callback(uarg);
661 674
662 /* skb frags point to kernel buffers */ 675 /* skb frags point to kernel buffers */
663 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 676 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
664 skb_shinfo(skb)->frags[i - 1].page_offset = 0; 677 __skb_fill_page_desc(skb, i-1, head, 0,
665 skb_shinfo(skb)->frags[i - 1].page = head; 678 skb_shinfo(skb)->frags[i - 1].size);
666 head = (struct page *)head->private; 679 head = (struct page *)head->private;
667 } 680 }
668 681
@@ -820,7 +833,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
820 } 833 }
821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 834 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
822 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 835 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
823 get_page(skb_shinfo(n)->frags[i].page); 836 skb_frag_ref(skb, i);
824 } 837 }
825 skb_shinfo(n)->nr_frags = i; 838 skb_shinfo(n)->nr_frags = i;
826 } 839 }
@@ -911,7 +924,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
911 goto nofrags; 924 goto nofrags;
912 } 925 }
913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 926 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
914 get_page(skb_shinfo(skb)->frags[i].page); 927 skb_frag_ref(skb, i);
915 928
916 if (skb_has_frag_list(skb)) 929 if (skb_has_frag_list(skb))
917 skb_clone_fraglist(skb); 930 skb_clone_fraglist(skb);
@@ -1178,20 +1191,20 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1178 goto drop_pages; 1191 goto drop_pages;
1179 1192
1180 for (; i < nfrags; i++) { 1193 for (; i < nfrags; i++) {
1181 int end = offset + skb_shinfo(skb)->frags[i].size; 1194 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1182 1195
1183 if (end < len) { 1196 if (end < len) {
1184 offset = end; 1197 offset = end;
1185 continue; 1198 continue;
1186 } 1199 }
1187 1200
1188 skb_shinfo(skb)->frags[i++].size = len - offset; 1201 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1189 1202
1190drop_pages: 1203drop_pages:
1191 skb_shinfo(skb)->nr_frags = i; 1204 skb_shinfo(skb)->nr_frags = i;
1192 1205
1193 for (; i < nfrags; i++) 1206 for (; i < nfrags; i++)
1194 put_page(skb_shinfo(skb)->frags[i].page); 1207 skb_frag_unref(skb, i);
1195 1208
1196 if (skb_has_frag_list(skb)) 1209 if (skb_has_frag_list(skb))
1197 skb_drop_fraglist(skb); 1210 skb_drop_fraglist(skb);
@@ -1294,9 +1307,11 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1294 /* Estimate size of pulled pages. */ 1307 /* Estimate size of pulled pages. */
1295 eat = delta; 1308 eat = delta;
1296 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1309 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1297 if (skb_shinfo(skb)->frags[i].size >= eat) 1310 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1311
1312 if (size >= eat)
1298 goto pull_pages; 1313 goto pull_pages;
1299 eat -= skb_shinfo(skb)->frags[i].size; 1314 eat -= size;
1300 } 1315 }
1301 1316
1302 /* If we need update frag list, we are in troubles. 1317 /* If we need update frag list, we are in troubles.
@@ -1359,14 +1374,16 @@ pull_pages:
1359 eat = delta; 1374 eat = delta;
1360 k = 0; 1375 k = 0;
1361 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1376 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1362 if (skb_shinfo(skb)->frags[i].size <= eat) { 1377 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1363 put_page(skb_shinfo(skb)->frags[i].page); 1378
1364 eat -= skb_shinfo(skb)->frags[i].size; 1379 if (size <= eat) {
1380 skb_frag_unref(skb, i);
1381 eat -= size;
1365 } else { 1382 } else {
1366 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1383 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1367 if (eat) { 1384 if (eat) {
1368 skb_shinfo(skb)->frags[k].page_offset += eat; 1385 skb_shinfo(skb)->frags[k].page_offset += eat;
1369 skb_shinfo(skb)->frags[k].size -= eat; 1386 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1370 eat = 0; 1387 eat = 0;
1371 } 1388 }
1372 k++; 1389 k++;
@@ -1421,7 +1438,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1421 1438
1422 WARN_ON(start > offset + len); 1439 WARN_ON(start > offset + len);
1423 1440
1424 end = start + skb_shinfo(skb)->frags[i].size; 1441 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1425 if ((copy = end - offset) > 0) { 1442 if ((copy = end - offset) > 0) {
1426 u8 *vaddr; 1443 u8 *vaddr;
1427 1444
@@ -1619,7 +1636,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1619 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1636 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1620 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1637 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1621 1638
1622 if (__splice_segment(f->page, f->page_offset, f->size, 1639 if (__splice_segment(skb_frag_page(f),
1640 f->page_offset, skb_frag_size(f),
1623 offset, len, skb, spd, 0, sk, pipe)) 1641 offset, len, skb, spd, 0, sk, pipe))
1624 return 1; 1642 return 1;
1625 } 1643 }
@@ -1729,7 +1747,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1729 1747
1730 WARN_ON(start > offset + len); 1748 WARN_ON(start > offset + len);
1731 1749
1732 end = start + frag->size; 1750 end = start + skb_frag_size(frag);
1733 if ((copy = end - offset) > 0) { 1751 if ((copy = end - offset) > 0) {
1734 u8 *vaddr; 1752 u8 *vaddr;
1735 1753
@@ -1802,7 +1820,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1802 1820
1803 WARN_ON(start > offset + len); 1821 WARN_ON(start > offset + len);
1804 1822
1805 end = start + skb_shinfo(skb)->frags[i].size; 1823 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1806 if ((copy = end - offset) > 0) { 1824 if ((copy = end - offset) > 0) {
1807 __wsum csum2; 1825 __wsum csum2;
1808 u8 *vaddr; 1826 u8 *vaddr;
@@ -1877,7 +1895,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1877 1895
1878 WARN_ON(start > offset + len); 1896 WARN_ON(start > offset + len);
1879 1897
1880 end = start + skb_shinfo(skb)->frags[i].size; 1898 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1881 if ((copy = end - offset) > 0) { 1899 if ((copy = end - offset) > 0) {
1882 __wsum csum2; 1900 __wsum csum2;
1883 u8 *vaddr; 1901 u8 *vaddr;
@@ -2150,7 +2168,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2150 skb->data_len = len - pos; 2168 skb->data_len = len - pos;
2151 2169
2152 for (i = 0; i < nfrags; i++) { 2170 for (i = 0; i < nfrags; i++) {
2153 int size = skb_shinfo(skb)->frags[i].size; 2171 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2154 2172
2155 if (pos + size > len) { 2173 if (pos + size > len) {
2156 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2174 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -2164,10 +2182,10 @@ static inline void skb_split_no_header(struct sk_buff *skb,
2164 * where splitting is expensive. 2182 * where splitting is expensive.
2165 * 2. Split is accurately. We make this. 2183 * 2. Split is accurately. We make this.
2166 */ 2184 */
2167 get_page(skb_shinfo(skb)->frags[i].page); 2185 skb_frag_ref(skb, i);
2168 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2186 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2169 skb_shinfo(skb1)->frags[0].size -= len - pos; 2187 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2170 skb_shinfo(skb)->frags[i].size = len - pos; 2188 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2171 skb_shinfo(skb)->nr_frags++; 2189 skb_shinfo(skb)->nr_frags++;
2172 } 2190 }
2173 k++; 2191 k++;
@@ -2239,12 +2257,13 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2239 * commit all, so that we don't have to undo partial changes 2257 * commit all, so that we don't have to undo partial changes
2240 */ 2258 */
2241 if (!to || 2259 if (!to ||
2242 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2260 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2261 fragfrom->page_offset)) {
2243 merge = -1; 2262 merge = -1;
2244 } else { 2263 } else {
2245 merge = to - 1; 2264 merge = to - 1;
2246 2265
2247 todo -= fragfrom->size; 2266 todo -= skb_frag_size(fragfrom);
2248 if (todo < 0) { 2267 if (todo < 0) {
2249 if (skb_prepare_for_shift(skb) || 2268 if (skb_prepare_for_shift(skb) ||
2250 skb_prepare_for_shift(tgt)) 2269 skb_prepare_for_shift(tgt))
@@ -2254,8 +2273,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2254 fragfrom = &skb_shinfo(skb)->frags[from]; 2273 fragfrom = &skb_shinfo(skb)->frags[from];
2255 fragto = &skb_shinfo(tgt)->frags[merge]; 2274 fragto = &skb_shinfo(tgt)->frags[merge];
2256 2275
2257 fragto->size += shiftlen; 2276 skb_frag_size_add(fragto, shiftlen);
2258 fragfrom->size -= shiftlen; 2277 skb_frag_size_sub(fragfrom, shiftlen);
2259 fragfrom->page_offset += shiftlen; 2278 fragfrom->page_offset += shiftlen;
2260 2279
2261 goto onlymerged; 2280 goto onlymerged;
@@ -2279,20 +2298,20 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2279 fragfrom = &skb_shinfo(skb)->frags[from]; 2298 fragfrom = &skb_shinfo(skb)->frags[from];
2280 fragto = &skb_shinfo(tgt)->frags[to]; 2299 fragto = &skb_shinfo(tgt)->frags[to];
2281 2300
2282 if (todo >= fragfrom->size) { 2301 if (todo >= skb_frag_size(fragfrom)) {
2283 *fragto = *fragfrom; 2302 *fragto = *fragfrom;
2284 todo -= fragfrom->size; 2303 todo -= skb_frag_size(fragfrom);
2285 from++; 2304 from++;
2286 to++; 2305 to++;
2287 2306
2288 } else { 2307 } else {
2289 get_page(fragfrom->page); 2308 __skb_frag_ref(fragfrom);
2290 fragto->page = fragfrom->page; 2309 fragto->page = fragfrom->page;
2291 fragto->page_offset = fragfrom->page_offset; 2310 fragto->page_offset = fragfrom->page_offset;
2292 fragto->size = todo; 2311 skb_frag_size_set(fragto, todo);
2293 2312
2294 fragfrom->page_offset += todo; 2313 fragfrom->page_offset += todo;
2295 fragfrom->size -= todo; 2314 skb_frag_size_sub(fragfrom, todo);
2296 todo = 0; 2315 todo = 0;
2297 2316
2298 to++; 2317 to++;
@@ -2307,8 +2326,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2307 fragfrom = &skb_shinfo(skb)->frags[0]; 2326 fragfrom = &skb_shinfo(skb)->frags[0];
2308 fragto = &skb_shinfo(tgt)->frags[merge]; 2327 fragto = &skb_shinfo(tgt)->frags[merge];
2309 2328
2310 fragto->size += fragfrom->size; 2329 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2311 put_page(fragfrom->page); 2330 __skb_frag_unref(fragfrom);
2312 } 2331 }
2313 2332
2314 /* Reposition in the original skb */ 2333 /* Reposition in the original skb */
@@ -2405,7 +2424,7 @@ next_skb:
2405 2424
2406 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2425 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2407 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2426 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2408 block_limit = frag->size + st->stepped_offset; 2427 block_limit = skb_frag_size(frag) + st->stepped_offset;
2409 2428
2410 if (abs_offset < block_limit) { 2429 if (abs_offset < block_limit) {
2411 if (!st->frag_data) 2430 if (!st->frag_data)
@@ -2423,7 +2442,7 @@ next_skb:
2423 } 2442 }
2424 2443
2425 st->frag_idx++; 2444 st->frag_idx++;
2426 st->stepped_offset += frag->size; 2445 st->stepped_offset += skb_frag_size(frag);
2427 } 2446 }
2428 2447
2429 if (st->frag_data) { 2448 if (st->frag_data) {
@@ -2553,14 +2572,13 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2553 left = PAGE_SIZE - frag->page_offset; 2572 left = PAGE_SIZE - frag->page_offset;
2554 copy = (length > left)? left : length; 2573 copy = (length > left)? left : length;
2555 2574
2556 ret = getfrag(from, (page_address(frag->page) + 2575 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2557 frag->page_offset + frag->size),
2558 offset, copy, 0, skb); 2576 offset, copy, 0, skb);
2559 if (ret < 0) 2577 if (ret < 0)
2560 return -EFAULT; 2578 return -EFAULT;
2561 2579
2562 /* copy was successful so update the size parameters */ 2580 /* copy was successful so update the size parameters */
2563 frag->size += copy; 2581 skb_frag_size_add(frag, copy);
2564 skb->len += copy; 2582 skb->len += copy;
2565 skb->data_len += copy; 2583 skb->data_len += copy;
2566 offset += copy; 2584 offset += copy;
@@ -2706,12 +2724,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2706 2724
2707 while (pos < offset + len && i < nfrags) { 2725 while (pos < offset + len && i < nfrags) {
2708 *frag = skb_shinfo(skb)->frags[i]; 2726 *frag = skb_shinfo(skb)->frags[i];
2709 get_page(frag->page); 2727 __skb_frag_ref(frag);
2710 size = frag->size; 2728 size = skb_frag_size(frag);
2711 2729
2712 if (pos < offset) { 2730 if (pos < offset) {
2713 frag->page_offset += offset - pos; 2731 frag->page_offset += offset - pos;
2714 frag->size -= offset - pos; 2732 skb_frag_size_sub(frag, offset - pos);
2715 } 2733 }
2716 2734
2717 skb_shinfo(nskb)->nr_frags++; 2735 skb_shinfo(nskb)->nr_frags++;
@@ -2720,7 +2738,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
2720 i++; 2738 i++;
2721 pos += size; 2739 pos += size;
2722 } else { 2740 } else {
2723 frag->size -= pos + size - (offset + len); 2741 skb_frag_size_sub(frag, pos + size - (offset + len));
2724 goto skip_fraglist; 2742 goto skip_fraglist;
2725 } 2743 }
2726 2744
@@ -2800,7 +2818,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2800 } while (--i); 2818 } while (--i);
2801 2819
2802 frag->page_offset += offset; 2820 frag->page_offset += offset;
2803 frag->size -= offset; 2821 skb_frag_size_sub(frag, offset);
2804 2822
2805 skb->truesize -= skb->data_len; 2823 skb->truesize -= skb->data_len;
2806 skb->len -= skb->data_len; 2824 skb->len -= skb->data_len;
@@ -2852,7 +2870,7 @@ merge:
2852 unsigned int eat = offset - headlen; 2870 unsigned int eat = offset - headlen;
2853 2871
2854 skbinfo->frags[0].page_offset += eat; 2872 skbinfo->frags[0].page_offset += eat;
2855 skbinfo->frags[0].size -= eat; 2873 skb_frag_size_sub(&skbinfo->frags[0], eat);
2856 skb->data_len -= eat; 2874 skb->data_len -= eat;
2857 skb->len -= eat; 2875 skb->len -= eat;
2858 offset = headlen; 2876 offset = headlen;
@@ -2923,13 +2941,13 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2923 2941
2924 WARN_ON(start > offset + len); 2942 WARN_ON(start > offset + len);
2925 2943
2926 end = start + skb_shinfo(skb)->frags[i].size; 2944 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2927 if ((copy = end - offset) > 0) { 2945 if ((copy = end - offset) > 0) {
2928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2946 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2929 2947
2930 if (copy > len) 2948 if (copy > len)
2931 copy = len; 2949 copy = len;
2932 sg_set_page(&sg[elt], frag->page, copy, 2950 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
2933 frag->page_offset+offset-start); 2951 frag->page_offset+offset-start);
2934 elt++; 2952 elt++;
2935 if (!(len -= copy)) 2953 if (!(len -= copy))