diff options
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 241 |
1 files changed, 111 insertions, 130 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8e815e685f28..b94d777e3eb4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -210,7 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
210 | shinfo->gso_type = 0; | 210 | shinfo->gso_type = 0; |
211 | shinfo->ip6_frag_id = 0; | 211 | shinfo->ip6_frag_id = 0; |
212 | shinfo->tx_flags.flags = 0; | 212 | shinfo->tx_flags.flags = 0; |
213 | shinfo->frag_list = NULL; | 213 | skb_frag_list_init(skb); |
214 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | 214 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); |
215 | 215 | ||
216 | if (fclone) { | 216 | if (fclone) { |
@@ -323,7 +323,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) | |||
323 | { | 323 | { |
324 | struct sk_buff *list; | 324 | struct sk_buff *list; |
325 | 325 | ||
326 | for (list = skb_shinfo(skb)->frag_list; list; list = list->next) | 326 | skb_walk_frags(skb, list) |
327 | skb_get(list); | 327 | skb_get(list); |
328 | } | 328 | } |
329 | 329 | ||
@@ -338,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb) | |||
338 | put_page(skb_shinfo(skb)->frags[i].page); | 338 | put_page(skb_shinfo(skb)->frags[i].page); |
339 | } | 339 | } |
340 | 340 | ||
341 | if (skb_shinfo(skb)->frag_list) | 341 | if (skb_has_frags(skb)) |
342 | skb_drop_fraglist(skb); | 342 | skb_drop_fraglist(skb); |
343 | 343 | ||
344 | kfree(skb->head); | 344 | kfree(skb->head); |
@@ -381,7 +381,7 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
381 | 381 | ||
382 | static void skb_release_head_state(struct sk_buff *skb) | 382 | static void skb_release_head_state(struct sk_buff *skb) |
383 | { | 383 | { |
384 | dst_release(skb->dst); | 384 | skb_dst_drop(skb); |
385 | #ifdef CONFIG_XFRM | 385 | #ifdef CONFIG_XFRM |
386 | secpath_put(skb->sp); | 386 | secpath_put(skb->sp); |
387 | #endif | 387 | #endif |
@@ -503,7 +503,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
503 | shinfo->gso_type = 0; | 503 | shinfo->gso_type = 0; |
504 | shinfo->ip6_frag_id = 0; | 504 | shinfo->ip6_frag_id = 0; |
505 | shinfo->tx_flags.flags = 0; | 505 | shinfo->tx_flags.flags = 0; |
506 | shinfo->frag_list = NULL; | 506 | skb_frag_list_init(skb); |
507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | 507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); |
508 | 508 | ||
509 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 509 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
@@ -521,7 +521,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
521 | new->transport_header = old->transport_header; | 521 | new->transport_header = old->transport_header; |
522 | new->network_header = old->network_header; | 522 | new->network_header = old->network_header; |
523 | new->mac_header = old->mac_header; | 523 | new->mac_header = old->mac_header; |
524 | new->dst = dst_clone(old->dst); | 524 | skb_dst_set(new, dst_clone(skb_dst(old))); |
525 | #ifdef CONFIG_XFRM | 525 | #ifdef CONFIG_XFRM |
526 | new->sp = secpath_get(old->sp); | 526 | new->sp = secpath_get(old->sp); |
527 | #endif | 527 | #endif |
@@ -552,7 +552,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
552 | new->vlan_tci = old->vlan_tci; | 552 | new->vlan_tci = old->vlan_tci; |
553 | #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) | 553 | #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) |
554 | new->do_not_encrypt = old->do_not_encrypt; | 554 | new->do_not_encrypt = old->do_not_encrypt; |
555 | new->requeue = old->requeue; | ||
556 | #endif | 555 | #endif |
557 | 556 | ||
558 | skb_copy_secmark(new, old); | 557 | skb_copy_secmark(new, old); |
@@ -758,7 +757,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) | |||
758 | skb_shinfo(n)->nr_frags = i; | 757 | skb_shinfo(n)->nr_frags = i; |
759 | } | 758 | } |
760 | 759 | ||
761 | if (skb_shinfo(skb)->frag_list) { | 760 | if (skb_has_frags(skb)) { |
762 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; | 761 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
763 | skb_clone_fraglist(n); | 762 | skb_clone_fraglist(n); |
764 | } | 763 | } |
@@ -821,7 +820,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
821 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 820 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
822 | get_page(skb_shinfo(skb)->frags[i].page); | 821 | get_page(skb_shinfo(skb)->frags[i].page); |
823 | 822 | ||
824 | if (skb_shinfo(skb)->frag_list) | 823 | if (skb_has_frags(skb)) |
825 | skb_clone_fraglist(skb); | 824 | skb_clone_fraglist(skb); |
826 | 825 | ||
827 | skb_release_data(skb); | 826 | skb_release_data(skb); |
@@ -1093,7 +1092,7 @@ drop_pages: | |||
1093 | for (; i < nfrags; i++) | 1092 | for (; i < nfrags; i++) |
1094 | put_page(skb_shinfo(skb)->frags[i].page); | 1093 | put_page(skb_shinfo(skb)->frags[i].page); |
1095 | 1094 | ||
1096 | if (skb_shinfo(skb)->frag_list) | 1095 | if (skb_has_frags(skb)) |
1097 | skb_drop_fraglist(skb); | 1096 | skb_drop_fraglist(skb); |
1098 | goto done; | 1097 | goto done; |
1099 | } | 1098 | } |
@@ -1188,7 +1187,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) | |||
1188 | /* Optimization: no fragments, no reasons to preestimate | 1187 | /* Optimization: no fragments, no reasons to preestimate |
1189 | * size of pulled pages. Superb. | 1188 | * size of pulled pages. Superb. |
1190 | */ | 1189 | */ |
1191 | if (!skb_shinfo(skb)->frag_list) | 1190 | if (!skb_has_frags(skb)) |
1192 | goto pull_pages; | 1191 | goto pull_pages; |
1193 | 1192 | ||
1194 | /* Estimate size of pulled pages. */ | 1193 | /* Estimate size of pulled pages. */ |
@@ -1285,8 +1284,9 @@ EXPORT_SYMBOL(__pskb_pull_tail); | |||
1285 | 1284 | ||
1286 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | 1285 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
1287 | { | 1286 | { |
1288 | int i, copy; | ||
1289 | int start = skb_headlen(skb); | 1287 | int start = skb_headlen(skb); |
1288 | struct sk_buff *frag_iter; | ||
1289 | int i, copy; | ||
1290 | 1290 | ||
1291 | if (offset > (int)skb->len - len) | 1291 | if (offset > (int)skb->len - len) |
1292 | goto fault; | 1292 | goto fault; |
@@ -1328,28 +1328,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |||
1328 | start = end; | 1328 | start = end; |
1329 | } | 1329 | } |
1330 | 1330 | ||
1331 | if (skb_shinfo(skb)->frag_list) { | 1331 | skb_walk_frags(skb, frag_iter) { |
1332 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1332 | int end; |
1333 | 1333 | ||
1334 | for (; list; list = list->next) { | 1334 | WARN_ON(start > offset + len); |
1335 | int end; | 1335 | |
1336 | 1336 | end = start + frag_iter->len; | |
1337 | WARN_ON(start > offset + len); | 1337 | if ((copy = end - offset) > 0) { |
1338 | 1338 | if (copy > len) | |
1339 | end = start + list->len; | 1339 | copy = len; |
1340 | if ((copy = end - offset) > 0) { | 1340 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
1341 | if (copy > len) | 1341 | goto fault; |
1342 | copy = len; | 1342 | if ((len -= copy) == 0) |
1343 | if (skb_copy_bits(list, offset - start, | 1343 | return 0; |
1344 | to, copy)) | 1344 | offset += copy; |
1345 | goto fault; | 1345 | to += copy; |
1346 | if ((len -= copy) == 0) | ||
1347 | return 0; | ||
1348 | offset += copy; | ||
1349 | to += copy; | ||
1350 | } | ||
1351 | start = end; | ||
1352 | } | 1346 | } |
1347 | start = end; | ||
1353 | } | 1348 | } |
1354 | if (!len) | 1349 | if (!len) |
1355 | return 0; | 1350 | return 0; |
@@ -1534,6 +1529,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1534 | .ops = &sock_pipe_buf_ops, | 1529 | .ops = &sock_pipe_buf_ops, |
1535 | .spd_release = sock_spd_release, | 1530 | .spd_release = sock_spd_release, |
1536 | }; | 1531 | }; |
1532 | struct sk_buff *frag_iter; | ||
1537 | struct sock *sk = skb->sk; | 1533 | struct sock *sk = skb->sk; |
1538 | 1534 | ||
1539 | /* | 1535 | /* |
@@ -1548,13 +1544,11 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1548 | /* | 1544 | /* |
1549 | * now see if we have a frag_list to map | 1545 | * now see if we have a frag_list to map |
1550 | */ | 1546 | */ |
1551 | if (skb_shinfo(skb)->frag_list) { | 1547 | skb_walk_frags(skb, frag_iter) { |
1552 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1548 | if (!tlen) |
1553 | 1549 | break; | |
1554 | for (; list && tlen; list = list->next) { | 1550 | if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk)) |
1555 | if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) | 1551 | break; |
1556 | break; | ||
1557 | } | ||
1558 | } | 1552 | } |
1559 | 1553 | ||
1560 | done: | 1554 | done: |
@@ -1593,8 +1587,9 @@ done: | |||
1593 | 1587 | ||
1594 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | 1588 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
1595 | { | 1589 | { |
1596 | int i, copy; | ||
1597 | int start = skb_headlen(skb); | 1590 | int start = skb_headlen(skb); |
1591 | struct sk_buff *frag_iter; | ||
1592 | int i, copy; | ||
1598 | 1593 | ||
1599 | if (offset > (int)skb->len - len) | 1594 | if (offset > (int)skb->len - len) |
1600 | goto fault; | 1595 | goto fault; |
@@ -1635,28 +1630,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | |||
1635 | start = end; | 1630 | start = end; |
1636 | } | 1631 | } |
1637 | 1632 | ||
1638 | if (skb_shinfo(skb)->frag_list) { | 1633 | skb_walk_frags(skb, frag_iter) { |
1639 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1634 | int end; |
1640 | 1635 | ||
1641 | for (; list; list = list->next) { | 1636 | WARN_ON(start > offset + len); |
1642 | int end; | 1637 | |
1643 | 1638 | end = start + frag_iter->len; | |
1644 | WARN_ON(start > offset + len); | 1639 | if ((copy = end - offset) > 0) { |
1645 | 1640 | if (copy > len) | |
1646 | end = start + list->len; | 1641 | copy = len; |
1647 | if ((copy = end - offset) > 0) { | 1642 | if (skb_store_bits(frag_iter, offset - start, |
1648 | if (copy > len) | 1643 | from, copy)) |
1649 | copy = len; | 1644 | goto fault; |
1650 | if (skb_store_bits(list, offset - start, | 1645 | if ((len -= copy) == 0) |
1651 | from, copy)) | 1646 | return 0; |
1652 | goto fault; | 1647 | offset += copy; |
1653 | if ((len -= copy) == 0) | 1648 | from += copy; |
1654 | return 0; | ||
1655 | offset += copy; | ||
1656 | from += copy; | ||
1657 | } | ||
1658 | start = end; | ||
1659 | } | 1649 | } |
1650 | start = end; | ||
1660 | } | 1651 | } |
1661 | if (!len) | 1652 | if (!len) |
1662 | return 0; | 1653 | return 0; |
@@ -1673,6 +1664,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1673 | { | 1664 | { |
1674 | int start = skb_headlen(skb); | 1665 | int start = skb_headlen(skb); |
1675 | int i, copy = start - offset; | 1666 | int i, copy = start - offset; |
1667 | struct sk_buff *frag_iter; | ||
1676 | int pos = 0; | 1668 | int pos = 0; |
1677 | 1669 | ||
1678 | /* Checksum header. */ | 1670 | /* Checksum header. */ |
@@ -1712,29 +1704,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1712 | start = end; | 1704 | start = end; |
1713 | } | 1705 | } |
1714 | 1706 | ||
1715 | if (skb_shinfo(skb)->frag_list) { | 1707 | skb_walk_frags(skb, frag_iter) { |
1716 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1708 | int end; |
1717 | 1709 | ||
1718 | for (; list; list = list->next) { | 1710 | WARN_ON(start > offset + len); |
1719 | int end; | 1711 | |
1720 | 1712 | end = start + frag_iter->len; | |
1721 | WARN_ON(start > offset + len); | 1713 | if ((copy = end - offset) > 0) { |
1722 | 1714 | __wsum csum2; | |
1723 | end = start + list->len; | 1715 | if (copy > len) |
1724 | if ((copy = end - offset) > 0) { | 1716 | copy = len; |
1725 | __wsum csum2; | 1717 | csum2 = skb_checksum(frag_iter, offset - start, |
1726 | if (copy > len) | 1718 | copy, 0); |
1727 | copy = len; | 1719 | csum = csum_block_add(csum, csum2, pos); |
1728 | csum2 = skb_checksum(list, offset - start, | 1720 | if ((len -= copy) == 0) |
1729 | copy, 0); | 1721 | return csum; |
1730 | csum = csum_block_add(csum, csum2, pos); | 1722 | offset += copy; |
1731 | if ((len -= copy) == 0) | 1723 | pos += copy; |
1732 | return csum; | ||
1733 | offset += copy; | ||
1734 | pos += copy; | ||
1735 | } | ||
1736 | start = end; | ||
1737 | } | 1724 | } |
1725 | start = end; | ||
1738 | } | 1726 | } |
1739 | BUG_ON(len); | 1727 | BUG_ON(len); |
1740 | 1728 | ||
@@ -1749,6 +1737,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1749 | { | 1737 | { |
1750 | int start = skb_headlen(skb); | 1738 | int start = skb_headlen(skb); |
1751 | int i, copy = start - offset; | 1739 | int i, copy = start - offset; |
1740 | struct sk_buff *frag_iter; | ||
1752 | int pos = 0; | 1741 | int pos = 0; |
1753 | 1742 | ||
1754 | /* Copy header. */ | 1743 | /* Copy header. */ |
@@ -1793,31 +1782,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1793 | start = end; | 1782 | start = end; |
1794 | } | 1783 | } |
1795 | 1784 | ||
1796 | if (skb_shinfo(skb)->frag_list) { | 1785 | skb_walk_frags(skb, frag_iter) { |
1797 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1786 | __wsum csum2; |
1787 | int end; | ||
1798 | 1788 | ||
1799 | for (; list; list = list->next) { | 1789 | WARN_ON(start > offset + len); |
1800 | __wsum csum2; | 1790 | |
1801 | int end; | 1791 | end = start + frag_iter->len; |
1802 | 1792 | if ((copy = end - offset) > 0) { | |
1803 | WARN_ON(start > offset + len); | 1793 | if (copy > len) |
1804 | 1794 | copy = len; | |
1805 | end = start + list->len; | 1795 | csum2 = skb_copy_and_csum_bits(frag_iter, |
1806 | if ((copy = end - offset) > 0) { | 1796 | offset - start, |
1807 | if (copy > len) | 1797 | to, copy, 0); |
1808 | copy = len; | 1798 | csum = csum_block_add(csum, csum2, pos); |
1809 | csum2 = skb_copy_and_csum_bits(list, | 1799 | if ((len -= copy) == 0) |
1810 | offset - start, | 1800 | return csum; |
1811 | to, copy, 0); | 1801 | offset += copy; |
1812 | csum = csum_block_add(csum, csum2, pos); | 1802 | to += copy; |
1813 | if ((len -= copy) == 0) | 1803 | pos += copy; |
1814 | return csum; | ||
1815 | offset += copy; | ||
1816 | to += copy; | ||
1817 | pos += copy; | ||
1818 | } | ||
1819 | start = end; | ||
1820 | } | 1804 | } |
1805 | start = end; | ||
1821 | } | 1806 | } |
1822 | BUG_ON(len); | 1807 | BUG_ON(len); |
1823 | return csum; | 1808 | return csum; |
@@ -2327,8 +2312,7 @@ next_skb: | |||
2327 | st->frag_data = NULL; | 2312 | st->frag_data = NULL; |
2328 | } | 2313 | } |
2329 | 2314 | ||
2330 | if (st->root_skb == st->cur_skb && | 2315 | if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) { |
2331 | skb_shinfo(st->root_skb)->frag_list) { | ||
2332 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | 2316 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
2333 | st->frag_idx = 0; | 2317 | st->frag_idx = 0; |
2334 | goto next_skb; | 2318 | goto next_skb; |
@@ -2639,7 +2623,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2639 | } else | 2623 | } else |
2640 | skb_get(fskb2); | 2624 | skb_get(fskb2); |
2641 | 2625 | ||
2642 | BUG_ON(skb_shinfo(nskb)->frag_list); | 2626 | SKB_FRAG_ASSERT(nskb); |
2643 | skb_shinfo(nskb)->frag_list = fskb2; | 2627 | skb_shinfo(nskb)->frag_list = fskb2; |
2644 | } | 2628 | } |
2645 | 2629 | ||
@@ -2796,6 +2780,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2796 | { | 2780 | { |
2797 | int start = skb_headlen(skb); | 2781 | int start = skb_headlen(skb); |
2798 | int i, copy = start - offset; | 2782 | int i, copy = start - offset; |
2783 | struct sk_buff *frag_iter; | ||
2799 | int elt = 0; | 2784 | int elt = 0; |
2800 | 2785 | ||
2801 | if (copy > 0) { | 2786 | if (copy > 0) { |
@@ -2829,26 +2814,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2829 | start = end; | 2814 | start = end; |
2830 | } | 2815 | } |
2831 | 2816 | ||
2832 | if (skb_shinfo(skb)->frag_list) { | 2817 | skb_walk_frags(skb, frag_iter) { |
2833 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 2818 | int end; |
2834 | |||
2835 | for (; list; list = list->next) { | ||
2836 | int end; | ||
2837 | 2819 | ||
2838 | WARN_ON(start > offset + len); | 2820 | WARN_ON(start > offset + len); |
2839 | 2821 | ||
2840 | end = start + list->len; | 2822 | end = start + frag_iter->len; |
2841 | if ((copy = end - offset) > 0) { | 2823 | if ((copy = end - offset) > 0) { |
2842 | if (copy > len) | 2824 | if (copy > len) |
2843 | copy = len; | 2825 | copy = len; |
2844 | elt += __skb_to_sgvec(list, sg+elt, offset - start, | 2826 | elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
2845 | copy); | 2827 | copy); |
2846 | if ((len -= copy) == 0) | 2828 | if ((len -= copy) == 0) |
2847 | return elt; | 2829 | return elt; |
2848 | offset += copy; | 2830 | offset += copy; |
2849 | } | ||
2850 | start = end; | ||
2851 | } | 2831 | } |
2832 | start = end; | ||
2852 | } | 2833 | } |
2853 | BUG_ON(len); | 2834 | BUG_ON(len); |
2854 | return elt; | 2835 | return elt; |
@@ -2896,7 +2877,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2896 | return -ENOMEM; | 2877 | return -ENOMEM; |
2897 | 2878 | ||
2898 | /* Easy case. Most of packets will go this way. */ | 2879 | /* Easy case. Most of packets will go this way. */ |
2899 | if (!skb_shinfo(skb)->frag_list) { | 2880 | if (!skb_has_frags(skb)) { |
2900 | /* A little of trouble, not enough of space for trailer. | 2881 | /* A little of trouble, not enough of space for trailer. |
2901 | * This should not happen, when stack is tuned to generate | 2882 | * This should not happen, when stack is tuned to generate |
2902 | * good frames. OK, on miss we reallocate and reserve even more | 2883 | * good frames. OK, on miss we reallocate and reserve even more |
@@ -2931,7 +2912,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2931 | 2912 | ||
2932 | if (skb1->next == NULL && tailbits) { | 2913 | if (skb1->next == NULL && tailbits) { |
2933 | if (skb_shinfo(skb1)->nr_frags || | 2914 | if (skb_shinfo(skb1)->nr_frags || |
2934 | skb_shinfo(skb1)->frag_list || | 2915 | skb_has_frags(skb1) || |
2935 | skb_tailroom(skb1) < tailbits) | 2916 | skb_tailroom(skb1) < tailbits) |
2936 | ntail = tailbits + 128; | 2917 | ntail = tailbits + 128; |
2937 | } | 2918 | } |
@@ -2940,7 +2921,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2940 | skb_cloned(skb1) || | 2921 | skb_cloned(skb1) || |
2941 | ntail || | 2922 | ntail || |
2942 | skb_shinfo(skb1)->nr_frags || | 2923 | skb_shinfo(skb1)->nr_frags || |
2943 | skb_shinfo(skb1)->frag_list) { | 2924 | skb_has_frags(skb1)) { |
2944 | struct sk_buff *skb2; | 2925 | struct sk_buff *skb2; |
2945 | 2926 | ||
2946 | /* Fuck, we are miserable poor guys... */ | 2927 | /* Fuck, we are miserable poor guys... */ |
@@ -3026,12 +3007,12 @@ EXPORT_SYMBOL_GPL(skb_tstamp_tx); | |||
3026 | */ | 3007 | */ |
3027 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) | 3008 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
3028 | { | 3009 | { |
3029 | if (unlikely(start > skb->len - 2) || | 3010 | if (unlikely(start > skb_headlen(skb)) || |
3030 | unlikely((int)start + off > skb->len - 2)) { | 3011 | unlikely((int)start + off > skb_headlen(skb) - 2)) { |
3031 | if (net_ratelimit()) | 3012 | if (net_ratelimit()) |
3032 | printk(KERN_WARNING | 3013 | printk(KERN_WARNING |
3033 | "bad partial csum: csum=%u/%u len=%u\n", | 3014 | "bad partial csum: csum=%u/%u len=%u\n", |
3034 | start, off, skb->len); | 3015 | start, off, skb_headlen(skb)); |
3035 | return false; | 3016 | return false; |
3036 | } | 3017 | } |
3037 | skb->ip_summed = CHECKSUM_PARTIAL; | 3018 | skb->ip_summed = CHECKSUM_PARTIAL; |