aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c330
1 files changed, 168 insertions, 162 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e505b5392e1e..9e0597d189b0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -39,6 +39,7 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/kernel.h> 41#include <linux/kernel.h>
42#include <linux/kmemcheck.h>
42#include <linux/mm.h> 43#include <linux/mm.h>
43#include <linux/interrupt.h> 44#include <linux/interrupt.h>
44#include <linux/in.h> 45#include <linux/in.h>
@@ -65,7 +66,7 @@
65 66
66#include <asm/uaccess.h> 67#include <asm/uaccess.h>
67#include <asm/system.h> 68#include <asm/system.h>
68#include <trace/skb.h> 69#include <trace/events/skb.h>
69 70
70#include "kmap_skb.h" 71#include "kmap_skb.h"
71 72
@@ -201,6 +202,12 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
201 skb->data = data; 202 skb->data = data;
202 skb_reset_tail_pointer(skb); 203 skb_reset_tail_pointer(skb);
203 skb->end = skb->tail + size; 204 skb->end = skb->tail + size;
205 kmemcheck_annotate_bitfield(skb, flags1);
206 kmemcheck_annotate_bitfield(skb, flags2);
207#ifdef NET_SKBUFF_DATA_USES_OFFSET
208 skb->mac_header = ~0U;
209#endif
210
204 /* make sure we initialize shinfo sequentially */ 211 /* make sure we initialize shinfo sequentially */
205 shinfo = skb_shinfo(skb); 212 shinfo = skb_shinfo(skb);
206 atomic_set(&shinfo->dataref, 1); 213 atomic_set(&shinfo->dataref, 1);
@@ -210,13 +217,15 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
210 shinfo->gso_type = 0; 217 shinfo->gso_type = 0;
211 shinfo->ip6_frag_id = 0; 218 shinfo->ip6_frag_id = 0;
212 shinfo->tx_flags.flags = 0; 219 shinfo->tx_flags.flags = 0;
213 shinfo->frag_list = NULL; 220 skb_frag_list_init(skb);
214 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 221 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
215 222
216 if (fclone) { 223 if (fclone) {
217 struct sk_buff *child = skb + 1; 224 struct sk_buff *child = skb + 1;
218 atomic_t *fclone_ref = (atomic_t *) (child + 1); 225 atomic_t *fclone_ref = (atomic_t *) (child + 1);
219 226
227 kmemcheck_annotate_bitfield(child, flags1);
228 kmemcheck_annotate_bitfield(child, flags2);
220 skb->fclone = SKB_FCLONE_ORIG; 229 skb->fclone = SKB_FCLONE_ORIG;
221 atomic_set(fclone_ref, 1); 230 atomic_set(fclone_ref, 1);
222 231
@@ -323,7 +332,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
323{ 332{
324 struct sk_buff *list; 333 struct sk_buff *list;
325 334
326 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 335 skb_walk_frags(skb, list)
327 skb_get(list); 336 skb_get(list);
328} 337}
329 338
@@ -338,7 +347,7 @@ static void skb_release_data(struct sk_buff *skb)
338 put_page(skb_shinfo(skb)->frags[i].page); 347 put_page(skb_shinfo(skb)->frags[i].page);
339 } 348 }
340 349
341 if (skb_shinfo(skb)->frag_list) 350 if (skb_has_frags(skb))
342 skb_drop_fraglist(skb); 351 skb_drop_fraglist(skb);
343 352
344 kfree(skb->head); 353 kfree(skb->head);
@@ -381,7 +390,7 @@ static void kfree_skbmem(struct sk_buff *skb)
381 390
382static void skb_release_head_state(struct sk_buff *skb) 391static void skb_release_head_state(struct sk_buff *skb)
383{ 392{
384 dst_release(skb->dst); 393 skb_dst_drop(skb);
385#ifdef CONFIG_XFRM 394#ifdef CONFIG_XFRM
386 secpath_put(skb->sp); 395 secpath_put(skb->sp);
387#endif 396#endif
@@ -503,7 +512,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
503 shinfo->gso_type = 0; 512 shinfo->gso_type = 0;
504 shinfo->ip6_frag_id = 0; 513 shinfo->ip6_frag_id = 0;
505 shinfo->tx_flags.flags = 0; 514 shinfo->tx_flags.flags = 0;
506 shinfo->frag_list = NULL; 515 skb_frag_list_init(skb);
507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); 516 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
508 517
509 memset(skb, 0, offsetof(struct sk_buff, tail)); 518 memset(skb, 0, offsetof(struct sk_buff, tail));
@@ -521,13 +530,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
521 new->transport_header = old->transport_header; 530 new->transport_header = old->transport_header;
522 new->network_header = old->network_header; 531 new->network_header = old->network_header;
523 new->mac_header = old->mac_header; 532 new->mac_header = old->mac_header;
524 new->dst = dst_clone(old->dst); 533 skb_dst_set(new, dst_clone(skb_dst(old)));
525#ifdef CONFIG_XFRM 534#ifdef CONFIG_XFRM
526 new->sp = secpath_get(old->sp); 535 new->sp = secpath_get(old->sp);
527#endif 536#endif
528 memcpy(new->cb, old->cb, sizeof(old->cb)); 537 memcpy(new->cb, old->cb, sizeof(old->cb));
529 new->csum_start = old->csum_start; 538 new->csum = old->csum;
530 new->csum_offset = old->csum_offset;
531 new->local_df = old->local_df; 539 new->local_df = old->local_df;
532 new->pkt_type = old->pkt_type; 540 new->pkt_type = old->pkt_type;
533 new->ip_summed = old->ip_summed; 541 new->ip_summed = old->ip_summed;
@@ -538,6 +546,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
538#endif 546#endif
539 new->protocol = old->protocol; 547 new->protocol = old->protocol;
540 new->mark = old->mark; 548 new->mark = old->mark;
549 new->iif = old->iif;
541 __nf_copy(new, old); 550 __nf_copy(new, old);
542#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 551#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
543 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 552 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
@@ -550,10 +559,17 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
550#endif 559#endif
551#endif 560#endif
552 new->vlan_tci = old->vlan_tci; 561 new->vlan_tci = old->vlan_tci;
562#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
563 new->do_not_encrypt = old->do_not_encrypt;
564#endif
553 565
554 skb_copy_secmark(new, old); 566 skb_copy_secmark(new, old);
555} 567}
556 568
569/*
570 * You should not add any new code to this function. Add it to
571 * __copy_skb_header above instead.
572 */
557static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 573static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
558{ 574{
559#define C(x) n->x = skb->x 575#define C(x) n->x = skb->x
@@ -569,16 +585,11 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
569 n->cloned = 1; 585 n->cloned = 1;
570 n->nohdr = 0; 586 n->nohdr = 0;
571 n->destructor = NULL; 587 n->destructor = NULL;
572 C(iif);
573 C(tail); 588 C(tail);
574 C(end); 589 C(end);
575 C(head); 590 C(head);
576 C(data); 591 C(data);
577 C(truesize); 592 C(truesize);
578#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
579 C(do_not_encrypt);
580 C(requeue);
581#endif
582 atomic_set(&n->users, 1); 593 atomic_set(&n->users, 1);
583 594
584 atomic_inc(&(skb_shinfo(skb)->dataref)); 595 atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -633,6 +644,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
633 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 644 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
634 if (!n) 645 if (!n)
635 return NULL; 646 return NULL;
647
648 kmemcheck_annotate_bitfield(n, flags1);
649 kmemcheck_annotate_bitfield(n, flags2);
636 n->fclone = SKB_FCLONE_UNAVAILABLE; 650 n->fclone = SKB_FCLONE_UNAVAILABLE;
637 } 651 }
638 652
@@ -655,7 +669,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
655 /* {transport,network,mac}_header are relative to skb->head */ 669 /* {transport,network,mac}_header are relative to skb->head */
656 new->transport_header += offset; 670 new->transport_header += offset;
657 new->network_header += offset; 671 new->network_header += offset;
658 new->mac_header += offset; 672 if (skb_mac_header_was_set(new))
673 new->mac_header += offset;
659#endif 674#endif
660 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 675 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
661 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 676 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
@@ -755,7 +770,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
755 skb_shinfo(n)->nr_frags = i; 770 skb_shinfo(n)->nr_frags = i;
756 } 771 }
757 772
758 if (skb_shinfo(skb)->frag_list) { 773 if (skb_has_frags(skb)) {
759 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 774 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
760 skb_clone_fraglist(n); 775 skb_clone_fraglist(n);
761 } 776 }
@@ -818,7 +833,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
818 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 833 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
819 get_page(skb_shinfo(skb)->frags[i].page); 834 get_page(skb_shinfo(skb)->frags[i].page);
820 835
821 if (skb_shinfo(skb)->frag_list) 836 if (skb_has_frags(skb))
822 skb_clone_fraglist(skb); 837 skb_clone_fraglist(skb);
823 838
824 skb_release_data(skb); 839 skb_release_data(skb);
@@ -837,7 +852,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
837 skb->tail += off; 852 skb->tail += off;
838 skb->transport_header += off; 853 skb->transport_header += off;
839 skb->network_header += off; 854 skb->network_header += off;
840 skb->mac_header += off; 855 if (skb_mac_header_was_set(skb))
856 skb->mac_header += off;
841 skb->csum_start += nhead; 857 skb->csum_start += nhead;
842 skb->cloned = 0; 858 skb->cloned = 0;
843 skb->hdr_len = 0; 859 skb->hdr_len = 0;
@@ -929,7 +945,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
929#ifdef NET_SKBUFF_DATA_USES_OFFSET 945#ifdef NET_SKBUFF_DATA_USES_OFFSET
930 n->transport_header += off; 946 n->transport_header += off;
931 n->network_header += off; 947 n->network_header += off;
932 n->mac_header += off; 948 if (skb_mac_header_was_set(skb))
949 n->mac_header += off;
933#endif 950#endif
934 951
935 return n; 952 return n;
@@ -1090,7 +1107,7 @@ drop_pages:
1090 for (; i < nfrags; i++) 1107 for (; i < nfrags; i++)
1091 put_page(skb_shinfo(skb)->frags[i].page); 1108 put_page(skb_shinfo(skb)->frags[i].page);
1092 1109
1093 if (skb_shinfo(skb)->frag_list) 1110 if (skb_has_frags(skb))
1094 skb_drop_fraglist(skb); 1111 skb_drop_fraglist(skb);
1095 goto done; 1112 goto done;
1096 } 1113 }
@@ -1185,7 +1202,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1185 /* Optimization: no fragments, no reasons to preestimate 1202 /* Optimization: no fragments, no reasons to preestimate
1186 * size of pulled pages. Superb. 1203 * size of pulled pages. Superb.
1187 */ 1204 */
1188 if (!skb_shinfo(skb)->frag_list) 1205 if (!skb_has_frags(skb))
1189 goto pull_pages; 1206 goto pull_pages;
1190 1207
1191 /* Estimate size of pulled pages. */ 1208 /* Estimate size of pulled pages. */
@@ -1282,8 +1299,9 @@ EXPORT_SYMBOL(__pskb_pull_tail);
1282 1299
1283int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1300int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1284{ 1301{
1285 int i, copy;
1286 int start = skb_headlen(skb); 1302 int start = skb_headlen(skb);
1303 struct sk_buff *frag_iter;
1304 int i, copy;
1287 1305
1288 if (offset > (int)skb->len - len) 1306 if (offset > (int)skb->len - len)
1289 goto fault; 1307 goto fault;
@@ -1325,28 +1343,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1325 start = end; 1343 start = end;
1326 } 1344 }
1327 1345
1328 if (skb_shinfo(skb)->frag_list) { 1346 skb_walk_frags(skb, frag_iter) {
1329 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1347 int end;
1330 1348
1331 for (; list; list = list->next) { 1349 WARN_ON(start > offset + len);
1332 int end; 1350
1333 1351 end = start + frag_iter->len;
1334 WARN_ON(start > offset + len); 1352 if ((copy = end - offset) > 0) {
1335 1353 if (copy > len)
1336 end = start + list->len; 1354 copy = len;
1337 if ((copy = end - offset) > 0) { 1355 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1338 if (copy > len) 1356 goto fault;
1339 copy = len; 1357 if ((len -= copy) == 0)
1340 if (skb_copy_bits(list, offset - start, 1358 return 0;
1341 to, copy)) 1359 offset += copy;
1342 goto fault; 1360 to += copy;
1343 if ((len -= copy) == 0)
1344 return 0;
1345 offset += copy;
1346 to += copy;
1347 }
1348 start = end;
1349 } 1361 }
1362 start = end;
1350 } 1363 }
1351 if (!len) 1364 if (!len)
1352 return 0; 1365 return 0;
@@ -1531,6 +1544,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1531 .ops = &sock_pipe_buf_ops, 1544 .ops = &sock_pipe_buf_ops,
1532 .spd_release = sock_spd_release, 1545 .spd_release = sock_spd_release,
1533 }; 1546 };
1547 struct sk_buff *frag_iter;
1534 struct sock *sk = skb->sk; 1548 struct sock *sk = skb->sk;
1535 1549
1536 /* 1550 /*
@@ -1545,13 +1559,11 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1545 /* 1559 /*
1546 * now see if we have a frag_list to map 1560 * now see if we have a frag_list to map
1547 */ 1561 */
1548 if (skb_shinfo(skb)->frag_list) { 1562 skb_walk_frags(skb, frag_iter) {
1549 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1563 if (!tlen)
1550 1564 break;
1551 for (; list && tlen; list = list->next) { 1565 if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
1552 if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) 1566 break;
1553 break;
1554 }
1555 } 1567 }
1556 1568
1557done: 1569done:
@@ -1590,8 +1602,9 @@ done:
1590 1602
1591int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1603int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1592{ 1604{
1593 int i, copy;
1594 int start = skb_headlen(skb); 1605 int start = skb_headlen(skb);
1606 struct sk_buff *frag_iter;
1607 int i, copy;
1595 1608
1596 if (offset > (int)skb->len - len) 1609 if (offset > (int)skb->len - len)
1597 goto fault; 1610 goto fault;
@@ -1632,28 +1645,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1632 start = end; 1645 start = end;
1633 } 1646 }
1634 1647
1635 if (skb_shinfo(skb)->frag_list) { 1648 skb_walk_frags(skb, frag_iter) {
1636 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1649 int end;
1637 1650
1638 for (; list; list = list->next) { 1651 WARN_ON(start > offset + len);
1639 int end; 1652
1640 1653 end = start + frag_iter->len;
1641 WARN_ON(start > offset + len); 1654 if ((copy = end - offset) > 0) {
1642 1655 if (copy > len)
1643 end = start + list->len; 1656 copy = len;
1644 if ((copy = end - offset) > 0) { 1657 if (skb_store_bits(frag_iter, offset - start,
1645 if (copy > len) 1658 from, copy))
1646 copy = len; 1659 goto fault;
1647 if (skb_store_bits(list, offset - start, 1660 if ((len -= copy) == 0)
1648 from, copy)) 1661 return 0;
1649 goto fault; 1662 offset += copy;
1650 if ((len -= copy) == 0) 1663 from += copy;
1651 return 0;
1652 offset += copy;
1653 from += copy;
1654 }
1655 start = end;
1656 } 1664 }
1665 start = end;
1657 } 1666 }
1658 if (!len) 1667 if (!len)
1659 return 0; 1668 return 0;
@@ -1670,6 +1679,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1670{ 1679{
1671 int start = skb_headlen(skb); 1680 int start = skb_headlen(skb);
1672 int i, copy = start - offset; 1681 int i, copy = start - offset;
1682 struct sk_buff *frag_iter;
1673 int pos = 0; 1683 int pos = 0;
1674 1684
1675 /* Checksum header. */ 1685 /* Checksum header. */
@@ -1709,29 +1719,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1709 start = end; 1719 start = end;
1710 } 1720 }
1711 1721
1712 if (skb_shinfo(skb)->frag_list) { 1722 skb_walk_frags(skb, frag_iter) {
1713 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1723 int end;
1714 1724
1715 for (; list; list = list->next) { 1725 WARN_ON(start > offset + len);
1716 int end; 1726
1717 1727 end = start + frag_iter->len;
1718 WARN_ON(start > offset + len); 1728 if ((copy = end - offset) > 0) {
1719 1729 __wsum csum2;
1720 end = start + list->len; 1730 if (copy > len)
1721 if ((copy = end - offset) > 0) { 1731 copy = len;
1722 __wsum csum2; 1732 csum2 = skb_checksum(frag_iter, offset - start,
1723 if (copy > len) 1733 copy, 0);
1724 copy = len; 1734 csum = csum_block_add(csum, csum2, pos);
1725 csum2 = skb_checksum(list, offset - start, 1735 if ((len -= copy) == 0)
1726 copy, 0); 1736 return csum;
1727 csum = csum_block_add(csum, csum2, pos); 1737 offset += copy;
1728 if ((len -= copy) == 0) 1738 pos += copy;
1729 return csum;
1730 offset += copy;
1731 pos += copy;
1732 }
1733 start = end;
1734 } 1739 }
1740 start = end;
1735 } 1741 }
1736 BUG_ON(len); 1742 BUG_ON(len);
1737 1743
@@ -1746,6 +1752,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1746{ 1752{
1747 int start = skb_headlen(skb); 1753 int start = skb_headlen(skb);
1748 int i, copy = start - offset; 1754 int i, copy = start - offset;
1755 struct sk_buff *frag_iter;
1749 int pos = 0; 1756 int pos = 0;
1750 1757
1751 /* Copy header. */ 1758 /* Copy header. */
@@ -1790,31 +1797,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1790 start = end; 1797 start = end;
1791 } 1798 }
1792 1799
1793 if (skb_shinfo(skb)->frag_list) { 1800 skb_walk_frags(skb, frag_iter) {
1794 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1801 __wsum csum2;
1802 int end;
1795 1803
1796 for (; list; list = list->next) { 1804 WARN_ON(start > offset + len);
1797 __wsum csum2; 1805
1798 int end; 1806 end = start + frag_iter->len;
1799 1807 if ((copy = end - offset) > 0) {
1800 WARN_ON(start > offset + len); 1808 if (copy > len)
1801 1809 copy = len;
1802 end = start + list->len; 1810 csum2 = skb_copy_and_csum_bits(frag_iter,
1803 if ((copy = end - offset) > 0) { 1811 offset - start,
1804 if (copy > len) 1812 to, copy, 0);
1805 copy = len; 1813 csum = csum_block_add(csum, csum2, pos);
1806 csum2 = skb_copy_and_csum_bits(list, 1814 if ((len -= copy) == 0)
1807 offset - start, 1815 return csum;
1808 to, copy, 0); 1816 offset += copy;
1809 csum = csum_block_add(csum, csum2, pos); 1817 to += copy;
1810 if ((len -= copy) == 0) 1818 pos += copy;
1811 return csum;
1812 offset += copy;
1813 to += copy;
1814 pos += copy;
1815 }
1816 start = end;
1817 } 1819 }
1820 start = end;
1818 } 1821 }
1819 BUG_ON(len); 1822 BUG_ON(len);
1820 return csum; 1823 return csum;
@@ -2324,8 +2327,7 @@ next_skb:
2324 st->frag_data = NULL; 2327 st->frag_data = NULL;
2325 } 2328 }
2326 2329
2327 if (st->root_skb == st->cur_skb && 2330 if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
2328 skb_shinfo(st->root_skb)->frag_list) {
2329 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2331 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2330 st->frag_idx = 0; 2332 st->frag_idx = 0;
2331 goto next_skb; 2333 goto next_skb;
@@ -2636,7 +2638,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2636 } else 2638 } else
2637 skb_get(fskb2); 2639 skb_get(fskb2);
2638 2640
2639 BUG_ON(skb_shinfo(nskb)->frag_list); 2641 SKB_FRAG_ASSERT(nskb);
2640 skb_shinfo(nskb)->frag_list = fskb2; 2642 skb_shinfo(nskb)->frag_list = fskb2;
2641 } 2643 }
2642 2644
@@ -2661,30 +2663,40 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2661{ 2663{
2662 struct sk_buff *p = *head; 2664 struct sk_buff *p = *head;
2663 struct sk_buff *nskb; 2665 struct sk_buff *nskb;
2666 struct skb_shared_info *skbinfo = skb_shinfo(skb);
2667 struct skb_shared_info *pinfo = skb_shinfo(p);
2664 unsigned int headroom; 2668 unsigned int headroom;
2665 unsigned int len = skb_gro_len(skb); 2669 unsigned int len = skb_gro_len(skb);
2670 unsigned int offset = skb_gro_offset(skb);
2671 unsigned int headlen = skb_headlen(skb);
2666 2672
2667 if (p->len + len >= 65536) 2673 if (p->len + len >= 65536)
2668 return -E2BIG; 2674 return -E2BIG;
2669 2675
2670 if (skb_shinfo(p)->frag_list) 2676 if (pinfo->frag_list)
2671 goto merge; 2677 goto merge;
2672 else if (skb_headlen(skb) <= skb_gro_offset(skb)) { 2678 else if (headlen <= offset) {
2673 if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags > 2679 skb_frag_t *frag;
2674 MAX_SKB_FRAGS) 2680 skb_frag_t *frag2;
2681 int i = skbinfo->nr_frags;
2682 int nr_frags = pinfo->nr_frags + i;
2683
2684 offset -= headlen;
2685
2686 if (nr_frags > MAX_SKB_FRAGS)
2675 return -E2BIG; 2687 return -E2BIG;
2676 2688
2677 skb_shinfo(skb)->frags[0].page_offset += 2689 pinfo->nr_frags = nr_frags;
2678 skb_gro_offset(skb) - skb_headlen(skb); 2690 skbinfo->nr_frags = 0;
2679 skb_shinfo(skb)->frags[0].size -=
2680 skb_gro_offset(skb) - skb_headlen(skb);
2681 2691
2682 memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, 2692 frag = pinfo->frags + nr_frags;
2683 skb_shinfo(skb)->frags, 2693 frag2 = skbinfo->frags + i;
2684 skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); 2694 do {
2695 *--frag = *--frag2;
2696 } while (--i);
2685 2697
2686 skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; 2698 frag->page_offset += offset;
2687 skb_shinfo(skb)->nr_frags = 0; 2699 frag->size -= offset;
2688 2700
2689 skb->truesize -= skb->data_len; 2701 skb->truesize -= skb->data_len;
2690 skb->len -= skb->data_len; 2702 skb->len -= skb->data_len;
@@ -2715,7 +2727,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2715 2727
2716 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2728 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2717 skb_shinfo(nskb)->frag_list = p; 2729 skb_shinfo(nskb)->frag_list = p;
2718 skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; 2730 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2719 skb_header_release(p); 2731 skb_header_release(p);
2720 nskb->prev = p; 2732 nskb->prev = p;
2721 2733
@@ -2730,16 +2742,13 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2730 p = nskb; 2742 p = nskb;
2731 2743
2732merge: 2744merge:
2733 if (skb_gro_offset(skb) > skb_headlen(skb)) { 2745 if (offset > headlen) {
2734 skb_shinfo(skb)->frags[0].page_offset += 2746 skbinfo->frags[0].page_offset += offset - headlen;
2735 skb_gro_offset(skb) - skb_headlen(skb); 2747 skbinfo->frags[0].size -= offset - headlen;
2736 skb_shinfo(skb)->frags[0].size -= 2748 offset = headlen;
2737 skb_gro_offset(skb) - skb_headlen(skb);
2738 skb_gro_reset_offset(skb);
2739 skb_gro_pull(skb, skb_headlen(skb));
2740 } 2749 }
2741 2750
2742 __skb_pull(skb, skb_gro_offset(skb)); 2751 __skb_pull(skb, offset);
2743 2752
2744 p->prev->next = skb; 2753 p->prev->next = skb;
2745 p->prev = skb; 2754 p->prev = skb;
@@ -2786,6 +2795,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2786{ 2795{
2787 int start = skb_headlen(skb); 2796 int start = skb_headlen(skb);
2788 int i, copy = start - offset; 2797 int i, copy = start - offset;
2798 struct sk_buff *frag_iter;
2789 int elt = 0; 2799 int elt = 0;
2790 2800
2791 if (copy > 0) { 2801 if (copy > 0) {
@@ -2819,26 +2829,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2819 start = end; 2829 start = end;
2820 } 2830 }
2821 2831
2822 if (skb_shinfo(skb)->frag_list) { 2832 skb_walk_frags(skb, frag_iter) {
2823 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2833 int end;
2824
2825 for (; list; list = list->next) {
2826 int end;
2827 2834
2828 WARN_ON(start > offset + len); 2835 WARN_ON(start > offset + len);
2829 2836
2830 end = start + list->len; 2837 end = start + frag_iter->len;
2831 if ((copy = end - offset) > 0) { 2838 if ((copy = end - offset) > 0) {
2832 if (copy > len) 2839 if (copy > len)
2833 copy = len; 2840 copy = len;
2834 elt += __skb_to_sgvec(list, sg+elt, offset - start, 2841 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2835 copy); 2842 copy);
2836 if ((len -= copy) == 0) 2843 if ((len -= copy) == 0)
2837 return elt; 2844 return elt;
2838 offset += copy; 2845 offset += copy;
2839 }
2840 start = end;
2841 } 2846 }
2847 start = end;
2842 } 2848 }
2843 BUG_ON(len); 2849 BUG_ON(len);
2844 return elt; 2850 return elt;
@@ -2886,7 +2892,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2886 return -ENOMEM; 2892 return -ENOMEM;
2887 2893
2888 /* Easy case. Most of packets will go this way. */ 2894 /* Easy case. Most of packets will go this way. */
2889 if (!skb_shinfo(skb)->frag_list) { 2895 if (!skb_has_frags(skb)) {
2890 /* A little of trouble, not enough of space for trailer. 2896 /* A little of trouble, not enough of space for trailer.
2891 * This should not happen, when stack is tuned to generate 2897 * This should not happen, when stack is tuned to generate
2892 * good frames. OK, on miss we reallocate and reserve even more 2898 * good frames. OK, on miss we reallocate and reserve even more
@@ -2921,7 +2927,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2921 2927
2922 if (skb1->next == NULL && tailbits) { 2928 if (skb1->next == NULL && tailbits) {
2923 if (skb_shinfo(skb1)->nr_frags || 2929 if (skb_shinfo(skb1)->nr_frags ||
2924 skb_shinfo(skb1)->frag_list || 2930 skb_has_frags(skb1) ||
2925 skb_tailroom(skb1) < tailbits) 2931 skb_tailroom(skb1) < tailbits)
2926 ntail = tailbits + 128; 2932 ntail = tailbits + 128;
2927 } 2933 }
@@ -2930,7 +2936,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2930 skb_cloned(skb1) || 2936 skb_cloned(skb1) ||
2931 ntail || 2937 ntail ||
2932 skb_shinfo(skb1)->nr_frags || 2938 skb_shinfo(skb1)->nr_frags ||
2933 skb_shinfo(skb1)->frag_list) { 2939 skb_has_frags(skb1)) {
2934 struct sk_buff *skb2; 2940 struct sk_buff *skb2;
2935 2941
2936 /* Fuck, we are miserable poor guys... */ 2942 /* Fuck, we are miserable poor guys... */
@@ -3016,12 +3022,12 @@ EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3016 */ 3022 */
3017bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3023bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3018{ 3024{
3019 if (unlikely(start > skb->len - 2) || 3025 if (unlikely(start > skb_headlen(skb)) ||
3020 unlikely((int)start + off > skb->len - 2)) { 3026 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3021 if (net_ratelimit()) 3027 if (net_ratelimit())
3022 printk(KERN_WARNING 3028 printk(KERN_WARNING
3023 "bad partial csum: csum=%u/%u len=%u\n", 3029 "bad partial csum: csum=%u/%u len=%u\n",
3024 start, off, skb->len); 3030 start, off, skb_headlen(skb));
3025 return false; 3031 return false;
3026 } 3032 }
3027 skb->ip_summed = CHECKSUM_PARTIAL; 3033 skb->ip_summed = CHECKSUM_PARTIAL;