aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-09 03:18:59 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-09 03:18:59 -0400
commitfbb398a832086c370bce47789e155bf5a08774e9 (patch)
tree0c1e409adf4bee8c4e70ddc621c08ff37b8ac89c /net/core/skbuff.c
parent4cf704fbea96075942bd033fd75aa4e76ae1c8a1 (diff)
net/core/skbuff.c: Use frag list abstraction interfaces.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c230
1 files changed, 106 insertions, 124 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a2473b1600e3..49961ba3c0f6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,7 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
210 shinfo->gso_type = 0; 210 shinfo->gso_type = 0;
211 shinfo->ip6_frag_id = 0; 211 shinfo->ip6_frag_id = 0;
212 shinfo->tx_flags.flags = 0; 212 shinfo->tx_flags.flags = 0;
213 shinfo->frag_list = NULL; 213 skb_frag_list_init(skb);
214 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 214 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
215 215
216 if (fclone) { 216 if (fclone) {
@@ -323,7 +323,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
323{ 323{
324 struct sk_buff *list; 324 struct sk_buff *list;
325 325
326 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 326 skb_walk_frags(skb, list)
327 skb_get(list); 327 skb_get(list);
328} 328}
329 329
@@ -338,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb)
338 put_page(skb_shinfo(skb)->frags[i].page); 338 put_page(skb_shinfo(skb)->frags[i].page);
339 } 339 }
340 340
341 if (skb_shinfo(skb)->frag_list) 341 if (skb_has_frags(skb))
342 skb_drop_fraglist(skb); 342 skb_drop_fraglist(skb);
343 343
344 kfree(skb->head); 344 kfree(skb->head);
@@ -503,7 +503,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
503 shinfo->gso_type = 0; 503 shinfo->gso_type = 0;
504 shinfo->ip6_frag_id = 0; 504 shinfo->ip6_frag_id = 0;
505 shinfo->tx_flags.flags = 0; 505 shinfo->tx_flags.flags = 0;
506 shinfo->frag_list = NULL; 506 skb_frag_list_init(skb);
507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
508 508
509 memset(skb, 0, offsetof(struct sk_buff, tail)); 509 memset(skb, 0, offsetof(struct sk_buff, tail));
@@ -758,7 +758,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
758 skb_shinfo(n)->nr_frags = i; 758 skb_shinfo(n)->nr_frags = i;
759 } 759 }
760 760
761 if (skb_shinfo(skb)->frag_list) { 761 if (skb_has_frags(skb)) {
762 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 762 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
763 skb_clone_fraglist(n); 763 skb_clone_fraglist(n);
764 } 764 }
@@ -821,7 +821,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
822 get_page(skb_shinfo(skb)->frags[i].page); 822 get_page(skb_shinfo(skb)->frags[i].page);
823 823
824 if (skb_shinfo(skb)->frag_list) 824 if (skb_has_frags(skb))
825 skb_clone_fraglist(skb); 825 skb_clone_fraglist(skb);
826 826
827 skb_release_data(skb); 827 skb_release_data(skb);
@@ -1093,7 +1093,7 @@ drop_pages:
1093 for (; i < nfrags; i++) 1093 for (; i < nfrags; i++)
1094 put_page(skb_shinfo(skb)->frags[i].page); 1094 put_page(skb_shinfo(skb)->frags[i].page);
1095 1095
1096 if (skb_shinfo(skb)->frag_list) 1096 if (skb_has_frags(skb))
1097 skb_drop_fraglist(skb); 1097 skb_drop_fraglist(skb);
1098 goto done; 1098 goto done;
1099 } 1099 }
@@ -1188,7 +1188,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1188 /* Optimization: no fragments, no reasons to preestimate 1188 /* Optimization: no fragments, no reasons to preestimate
1189 * size of pulled pages. Superb. 1189 * size of pulled pages. Superb.
1190 */ 1190 */
1191 if (!skb_shinfo(skb)->frag_list) 1191 if (!skb_has_frags(skb))
1192 goto pull_pages; 1192 goto pull_pages;
1193 1193
1194 /* Estimate size of pulled pages. */ 1194 /* Estimate size of pulled pages. */
@@ -1285,8 +1285,9 @@ EXPORT_SYMBOL(__pskb_pull_tail);
1285 1285
1286int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1286int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1287{ 1287{
1288 int i, copy;
1289 int start = skb_headlen(skb); 1288 int start = skb_headlen(skb);
1289 struct sk_buff *frag_iter;
1290 int i, copy;
1290 1291
1291 if (offset > (int)skb->len - len) 1292 if (offset > (int)skb->len - len)
1292 goto fault; 1293 goto fault;
@@ -1328,28 +1329,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1328 start = end; 1329 start = end;
1329 } 1330 }
1330 1331
1331 if (skb_shinfo(skb)->frag_list) { 1332 skb_walk_frags(skb, frag_iter) {
1332 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1333 int end;
1333 1334
1334 for (; list; list = list->next) { 1335 WARN_ON(start > offset + len);
1335 int end; 1336
1336 1337 end = start + frag_iter->len;
1337 WARN_ON(start > offset + len); 1338 if ((copy = end - offset) > 0) {
1338 1339 if (copy > len)
1339 end = start + list->len; 1340 copy = len;
1340 if ((copy = end - offset) > 0) { 1341 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1341 if (copy > len) 1342 goto fault;
1342 copy = len; 1343 if ((len -= copy) == 0)
1343 if (skb_copy_bits(list, offset - start, 1344 return 0;
1344 to, copy)) 1345 offset += copy;
1345 goto fault; 1346 to += copy;
1346 if ((len -= copy) == 0)
1347 return 0;
1348 offset += copy;
1349 to += copy;
1350 }
1351 start = end;
1352 } 1347 }
1348 start = end;
1353 } 1349 }
1354 if (!len) 1350 if (!len)
1355 return 0; 1351 return 0;
@@ -1534,6 +1530,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1534 .ops = &sock_pipe_buf_ops, 1530 .ops = &sock_pipe_buf_ops,
1535 .spd_release = sock_spd_release, 1531 .spd_release = sock_spd_release,
1536 }; 1532 };
1533 struct sk_buff *frag_iter;
1537 struct sock *sk = skb->sk; 1534 struct sock *sk = skb->sk;
1538 1535
1539 /* 1536 /*
@@ -1548,13 +1545,11 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1548 /* 1545 /*
1549 * now see if we have a frag_list to map 1546 * now see if we have a frag_list to map
1550 */ 1547 */
1551 if (skb_shinfo(skb)->frag_list) { 1548 skb_walk_frags(skb, frag_iter) {
1552 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1549 if (!tlen)
1553 1550 break;
1554 for (; list && tlen; list = list->next) { 1551 if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
1555 if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) 1552 break;
1556 break;
1557 }
1558 } 1553 }
1559 1554
1560done: 1555done:
@@ -1593,8 +1588,9 @@ done:
1593 1588
1594int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1589int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1595{ 1590{
1596 int i, copy;
1597 int start = skb_headlen(skb); 1591 int start = skb_headlen(skb);
1592 struct sk_buff *frag_iter;
1593 int i, copy;
1598 1594
1599 if (offset > (int)skb->len - len) 1595 if (offset > (int)skb->len - len)
1600 goto fault; 1596 goto fault;
@@ -1635,28 +1631,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1635 start = end; 1631 start = end;
1636 } 1632 }
1637 1633
1638 if (skb_shinfo(skb)->frag_list) { 1634 skb_walk_frags(skb, frag_iter) {
1639 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1635 int end;
1640 1636
1641 for (; list; list = list->next) { 1637 WARN_ON(start > offset + len);
1642 int end; 1638
1643 1639 end = start + frag_iter->len;
1644 WARN_ON(start > offset + len); 1640 if ((copy = end - offset) > 0) {
1645 1641 if (copy > len)
1646 end = start + list->len; 1642 copy = len;
1647 if ((copy = end - offset) > 0) { 1643 if (skb_store_bits(frag_iter, offset - start,
1648 if (copy > len) 1644 from, copy))
1649 copy = len; 1645 goto fault;
1650 if (skb_store_bits(list, offset - start, 1646 if ((len -= copy) == 0)
1651 from, copy)) 1647 return 0;
1652 goto fault; 1648 offset += copy;
1653 if ((len -= copy) == 0) 1649 from += copy;
1654 return 0;
1655 offset += copy;
1656 from += copy;
1657 }
1658 start = end;
1659 } 1650 }
1651 start = end;
1660 } 1652 }
1661 if (!len) 1653 if (!len)
1662 return 0; 1654 return 0;
@@ -1673,6 +1665,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1673{ 1665{
1674 int start = skb_headlen(skb); 1666 int start = skb_headlen(skb);
1675 int i, copy = start - offset; 1667 int i, copy = start - offset;
1668 struct sk_buff *frag_iter;
1676 int pos = 0; 1669 int pos = 0;
1677 1670
1678 /* Checksum header. */ 1671 /* Checksum header. */
@@ -1712,29 +1705,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1712 start = end; 1705 start = end;
1713 } 1706 }
1714 1707
1715 if (skb_shinfo(skb)->frag_list) { 1708 skb_walk_frags(skb, frag_iter) {
1716 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1709 int end;
1717 1710
1718 for (; list; list = list->next) { 1711 WARN_ON(start > offset + len);
1719 int end; 1712
1720 1713 end = start + frag_iter->len;
1721 WARN_ON(start > offset + len); 1714 if ((copy = end - offset) > 0) {
1722 1715 __wsum csum2;
1723 end = start + list->len; 1716 if (copy > len)
1724 if ((copy = end - offset) > 0) { 1717 copy = len;
1725 __wsum csum2; 1718 csum2 = skb_checksum(frag_iter, offset - start,
1726 if (copy > len) 1719 copy, 0);
1727 copy = len; 1720 csum = csum_block_add(csum, csum2, pos);
1728 csum2 = skb_checksum(list, offset - start, 1721 if ((len -= copy) == 0)
1729 copy, 0); 1722 return csum;
1730 csum = csum_block_add(csum, csum2, pos); 1723 offset += copy;
1731 if ((len -= copy) == 0) 1724 pos += copy;
1732 return csum;
1733 offset += copy;
1734 pos += copy;
1735 }
1736 start = end;
1737 } 1725 }
1726 start = end;
1738 } 1727 }
1739 BUG_ON(len); 1728 BUG_ON(len);
1740 1729
@@ -1749,6 +1738,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1749{ 1738{
1750 int start = skb_headlen(skb); 1739 int start = skb_headlen(skb);
1751 int i, copy = start - offset; 1740 int i, copy = start - offset;
1741 struct sk_buff *frag_iter;
1752 int pos = 0; 1742 int pos = 0;
1753 1743
1754 /* Copy header. */ 1744 /* Copy header. */
@@ -1793,31 +1783,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1793 start = end; 1783 start = end;
1794 } 1784 }
1795 1785
1796 if (skb_shinfo(skb)->frag_list) { 1786 skb_walk_frags(skb, frag_iter) {
1797 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1787 __wsum csum2;
1788 int end;
1798 1789
1799 for (; list; list = list->next) { 1790 WARN_ON(start > offset + len);
1800 __wsum csum2; 1791
1801 int end; 1792 end = start + frag_iter->len;
1802 1793 if ((copy = end - offset) > 0) {
1803 WARN_ON(start > offset + len); 1794 if (copy > len)
1804 1795 copy = len;
1805 end = start + list->len; 1796 csum2 = skb_copy_and_csum_bits(frag_iter,
1806 if ((copy = end - offset) > 0) { 1797 offset - start,
1807 if (copy > len) 1798 to, copy, 0);
1808 copy = len; 1799 csum = csum_block_add(csum, csum2, pos);
1809 csum2 = skb_copy_and_csum_bits(list, 1800 if ((len -= copy) == 0)
1810 offset - start, 1801 return csum;
1811 to, copy, 0); 1802 offset += copy;
1812 csum = csum_block_add(csum, csum2, pos); 1803 to += copy;
1813 if ((len -= copy) == 0) 1804 pos += copy;
1814 return csum;
1815 offset += copy;
1816 to += copy;
1817 pos += copy;
1818 }
1819 start = end;
1820 } 1805 }
1806 start = end;
1821 } 1807 }
1822 BUG_ON(len); 1808 BUG_ON(len);
1823 return csum; 1809 return csum;
@@ -2327,8 +2313,7 @@ next_skb:
2327 st->frag_data = NULL; 2313 st->frag_data = NULL;
2328 } 2314 }
2329 2315
2330 if (st->root_skb == st->cur_skb && 2316 if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
2331 skb_shinfo(st->root_skb)->frag_list) {
2332 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2317 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2333 st->frag_idx = 0; 2318 st->frag_idx = 0;
2334 goto next_skb; 2319 goto next_skb;
@@ -2639,7 +2624,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2639 } else 2624 } else
2640 skb_get(fskb2); 2625 skb_get(fskb2);
2641 2626
2642 BUG_ON(skb_shinfo(nskb)->frag_list); 2627 SKB_FRAG_ASSERT(nskb);
2643 skb_shinfo(nskb)->frag_list = fskb2; 2628 skb_shinfo(nskb)->frag_list = fskb2;
2644 } 2629 }
2645 2630
@@ -2796,6 +2781,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2796{ 2781{
2797 int start = skb_headlen(skb); 2782 int start = skb_headlen(skb);
2798 int i, copy = start - offset; 2783 int i, copy = start - offset;
2784 struct sk_buff *frag_iter;
2799 int elt = 0; 2785 int elt = 0;
2800 2786
2801 if (copy > 0) { 2787 if (copy > 0) {
@@ -2829,26 +2815,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2829 start = end; 2815 start = end;
2830 } 2816 }
2831 2817
2832 if (skb_shinfo(skb)->frag_list) { 2818 skb_walk_frags(skb, frag_iter) {
2833 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2819 int end;
2834
2835 for (; list; list = list->next) {
2836 int end;
2837 2820
2838 WARN_ON(start > offset + len); 2821 WARN_ON(start > offset + len);
2839 2822
2840 end = start + list->len; 2823 end = start + frag_iter->len;
2841 if ((copy = end - offset) > 0) { 2824 if ((copy = end - offset) > 0) {
2842 if (copy > len) 2825 if (copy > len)
2843 copy = len; 2826 copy = len;
2844 elt += __skb_to_sgvec(list, sg+elt, offset - start, 2827 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2845 copy); 2828 copy);
2846 if ((len -= copy) == 0) 2829 if ((len -= copy) == 0)
2847 return elt; 2830 return elt;
2848 offset += copy; 2831 offset += copy;
2849 }
2850 start = end;
2851 } 2832 }
2833 start = end;
2852 } 2834 }
2853 BUG_ON(len); 2835 BUG_ON(len);
2854 return elt; 2836 return elt;
@@ -2896,7 +2878,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2896 return -ENOMEM; 2878 return -ENOMEM;
2897 2879
2898 /* Easy case. Most of packets will go this way. */ 2880 /* Easy case. Most of packets will go this way. */
2899 if (!skb_shinfo(skb)->frag_list) { 2881 if (!skb_has_frags(skb)) {
2900 /* A little of trouble, not enough of space for trailer. 2882 /* A little of trouble, not enough of space for trailer.
2901 * This should not happen, when stack is tuned to generate 2883 * This should not happen, when stack is tuned to generate
2902 * good frames. OK, on miss we reallocate and reserve even more 2884 * good frames. OK, on miss we reallocate and reserve even more
@@ -2931,7 +2913,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2931 2913
2932 if (skb1->next == NULL && tailbits) { 2914 if (skb1->next == NULL && tailbits) {
2933 if (skb_shinfo(skb1)->nr_frags || 2915 if (skb_shinfo(skb1)->nr_frags ||
2934 skb_shinfo(skb1)->frag_list || 2916 skb_has_frags(skb1) ||
2935 skb_tailroom(skb1) < tailbits) 2917 skb_tailroom(skb1) < tailbits)
2936 ntail = tailbits + 128; 2918 ntail = tailbits + 128;
2937 } 2919 }
@@ -2940,7 +2922,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2940 skb_cloned(skb1) || 2922 skb_cloned(skb1) ||
2941 ntail || 2923 ntail ||
2942 skb_shinfo(skb1)->nr_frags || 2924 skb_shinfo(skb1)->nr_frags ||
2943 skb_shinfo(skb1)->frag_list) { 2925 skb_has_frags(skb1)) {
2944 struct sk_buff *skb2; 2926 struct sk_buff *skb2;
2945 2927
2946 /* Fuck, we are miserable poor guys... */ 2928 /* Fuck, we are miserable poor guys... */