diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-15 12:40:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-15 12:40:05 -0400 |
commit | 2ed0e21b30b53d3a94e204196e523e6c8f732b56 (patch) | |
tree | de2635426477d86338a9469ce09ba0626052288f /net/core/skbuff.c | |
parent | 0fa213310cd8fa7a51071cdcf130e26fa56e9549 (diff) | |
parent | 9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1244 commits)
pkt_sched: Rename PSCHED_US2NS and PSCHED_NS2US
ipv4: Fix fib_trie rebalancing
Bluetooth: Fix issue with uninitialized nsh.type in DTL-1 driver
Bluetooth: Fix Kconfig issue with RFKILL integration
PIM-SM: namespace changes
ipv4: update ARPD help text
net: use a deferred timer in rt_check_expire
ieee802154: fix kconfig bool/tristate muckup
bonding: initialization rework
bonding: use is_zero_ether_addr
bonding: network device names are case sensative
bonding: elminate bad refcount code
bonding: fix style issues
bonding: fix destructor
bonding: remove bonding read/write semaphore
bonding: initialize before registration
bonding: bond_create always called with default parameters
x_tables: Convert printk to pr_err
netfilter: conntrack: optional reliable conntrack event delivery
list_nulls: add hlist_nulls_add_head and hlist_nulls_del
...
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 307 |
1 files changed, 149 insertions, 158 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c2e4fb8f3546..1a94a3037370 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -210,7 +210,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
210 | shinfo->gso_type = 0; | 210 | shinfo->gso_type = 0; |
211 | shinfo->ip6_frag_id = 0; | 211 | shinfo->ip6_frag_id = 0; |
212 | shinfo->tx_flags.flags = 0; | 212 | shinfo->tx_flags.flags = 0; |
213 | shinfo->frag_list = NULL; | 213 | skb_frag_list_init(skb); |
214 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | 214 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); |
215 | 215 | ||
216 | if (fclone) { | 216 | if (fclone) { |
@@ -323,7 +323,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) | |||
323 | { | 323 | { |
324 | struct sk_buff *list; | 324 | struct sk_buff *list; |
325 | 325 | ||
326 | for (list = skb_shinfo(skb)->frag_list; list; list = list->next) | 326 | skb_walk_frags(skb, list) |
327 | skb_get(list); | 327 | skb_get(list); |
328 | } | 328 | } |
329 | 329 | ||
@@ -338,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb) | |||
338 | put_page(skb_shinfo(skb)->frags[i].page); | 338 | put_page(skb_shinfo(skb)->frags[i].page); |
339 | } | 339 | } |
340 | 340 | ||
341 | if (skb_shinfo(skb)->frag_list) | 341 | if (skb_has_frags(skb)) |
342 | skb_drop_fraglist(skb); | 342 | skb_drop_fraglist(skb); |
343 | 343 | ||
344 | kfree(skb->head); | 344 | kfree(skb->head); |
@@ -381,7 +381,7 @@ static void kfree_skbmem(struct sk_buff *skb) | |||
381 | 381 | ||
382 | static void skb_release_head_state(struct sk_buff *skb) | 382 | static void skb_release_head_state(struct sk_buff *skb) |
383 | { | 383 | { |
384 | dst_release(skb->dst); | 384 | skb_dst_drop(skb); |
385 | #ifdef CONFIG_XFRM | 385 | #ifdef CONFIG_XFRM |
386 | secpath_put(skb->sp); | 386 | secpath_put(skb->sp); |
387 | #endif | 387 | #endif |
@@ -503,7 +503,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
503 | shinfo->gso_type = 0; | 503 | shinfo->gso_type = 0; |
504 | shinfo->ip6_frag_id = 0; | 504 | shinfo->ip6_frag_id = 0; |
505 | shinfo->tx_flags.flags = 0; | 505 | shinfo->tx_flags.flags = 0; |
506 | shinfo->frag_list = NULL; | 506 | skb_frag_list_init(skb); |
507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | 507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); |
508 | 508 | ||
509 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 509 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
@@ -521,13 +521,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
521 | new->transport_header = old->transport_header; | 521 | new->transport_header = old->transport_header; |
522 | new->network_header = old->network_header; | 522 | new->network_header = old->network_header; |
523 | new->mac_header = old->mac_header; | 523 | new->mac_header = old->mac_header; |
524 | new->dst = dst_clone(old->dst); | 524 | skb_dst_set(new, dst_clone(skb_dst(old))); |
525 | #ifdef CONFIG_XFRM | 525 | #ifdef CONFIG_XFRM |
526 | new->sp = secpath_get(old->sp); | 526 | new->sp = secpath_get(old->sp); |
527 | #endif | 527 | #endif |
528 | memcpy(new->cb, old->cb, sizeof(old->cb)); | 528 | memcpy(new->cb, old->cb, sizeof(old->cb)); |
529 | new->csum_start = old->csum_start; | 529 | new->csum = old->csum; |
530 | new->csum_offset = old->csum_offset; | ||
531 | new->local_df = old->local_df; | 530 | new->local_df = old->local_df; |
532 | new->pkt_type = old->pkt_type; | 531 | new->pkt_type = old->pkt_type; |
533 | new->ip_summed = old->ip_summed; | 532 | new->ip_summed = old->ip_summed; |
@@ -538,6 +537,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
538 | #endif | 537 | #endif |
539 | new->protocol = old->protocol; | 538 | new->protocol = old->protocol; |
540 | new->mark = old->mark; | 539 | new->mark = old->mark; |
540 | new->iif = old->iif; | ||
541 | __nf_copy(new, old); | 541 | __nf_copy(new, old); |
542 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ | 542 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
543 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) | 543 | defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) |
@@ -550,10 +550,17 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
550 | #endif | 550 | #endif |
551 | #endif | 551 | #endif |
552 | new->vlan_tci = old->vlan_tci; | 552 | new->vlan_tci = old->vlan_tci; |
553 | #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) | ||
554 | new->do_not_encrypt = old->do_not_encrypt; | ||
555 | #endif | ||
553 | 556 | ||
554 | skb_copy_secmark(new, old); | 557 | skb_copy_secmark(new, old); |
555 | } | 558 | } |
556 | 559 | ||
560 | /* | ||
561 | * You should not add any new code to this function. Add it to | ||
562 | * __copy_skb_header above instead. | ||
563 | */ | ||
557 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | 564 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) |
558 | { | 565 | { |
559 | #define C(x) n->x = skb->x | 566 | #define C(x) n->x = skb->x |
@@ -569,16 +576,11 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
569 | n->cloned = 1; | 576 | n->cloned = 1; |
570 | n->nohdr = 0; | 577 | n->nohdr = 0; |
571 | n->destructor = NULL; | 578 | n->destructor = NULL; |
572 | C(iif); | ||
573 | C(tail); | 579 | C(tail); |
574 | C(end); | 580 | C(end); |
575 | C(head); | 581 | C(head); |
576 | C(data); | 582 | C(data); |
577 | C(truesize); | 583 | C(truesize); |
578 | #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) | ||
579 | C(do_not_encrypt); | ||
580 | C(requeue); | ||
581 | #endif | ||
582 | atomic_set(&n->users, 1); | 584 | atomic_set(&n->users, 1); |
583 | 585 | ||
584 | atomic_inc(&(skb_shinfo(skb)->dataref)); | 586 | atomic_inc(&(skb_shinfo(skb)->dataref)); |
@@ -755,7 +757,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) | |||
755 | skb_shinfo(n)->nr_frags = i; | 757 | skb_shinfo(n)->nr_frags = i; |
756 | } | 758 | } |
757 | 759 | ||
758 | if (skb_shinfo(skb)->frag_list) { | 760 | if (skb_has_frags(skb)) { |
759 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; | 761 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
760 | skb_clone_fraglist(n); | 762 | skb_clone_fraglist(n); |
761 | } | 763 | } |
@@ -818,7 +820,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
818 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 820 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
819 | get_page(skb_shinfo(skb)->frags[i].page); | 821 | get_page(skb_shinfo(skb)->frags[i].page); |
820 | 822 | ||
821 | if (skb_shinfo(skb)->frag_list) | 823 | if (skb_has_frags(skb)) |
822 | skb_clone_fraglist(skb); | 824 | skb_clone_fraglist(skb); |
823 | 825 | ||
824 | skb_release_data(skb); | 826 | skb_release_data(skb); |
@@ -1090,7 +1092,7 @@ drop_pages: | |||
1090 | for (; i < nfrags; i++) | 1092 | for (; i < nfrags; i++) |
1091 | put_page(skb_shinfo(skb)->frags[i].page); | 1093 | put_page(skb_shinfo(skb)->frags[i].page); |
1092 | 1094 | ||
1093 | if (skb_shinfo(skb)->frag_list) | 1095 | if (skb_has_frags(skb)) |
1094 | skb_drop_fraglist(skb); | 1096 | skb_drop_fraglist(skb); |
1095 | goto done; | 1097 | goto done; |
1096 | } | 1098 | } |
@@ -1185,7 +1187,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) | |||
1185 | /* Optimization: no fragments, no reasons to preestimate | 1187 | /* Optimization: no fragments, no reasons to preestimate |
1186 | * size of pulled pages. Superb. | 1188 | * size of pulled pages. Superb. |
1187 | */ | 1189 | */ |
1188 | if (!skb_shinfo(skb)->frag_list) | 1190 | if (!skb_has_frags(skb)) |
1189 | goto pull_pages; | 1191 | goto pull_pages; |
1190 | 1192 | ||
1191 | /* Estimate size of pulled pages. */ | 1193 | /* Estimate size of pulled pages. */ |
@@ -1282,8 +1284,9 @@ EXPORT_SYMBOL(__pskb_pull_tail); | |||
1282 | 1284 | ||
1283 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | 1285 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
1284 | { | 1286 | { |
1285 | int i, copy; | ||
1286 | int start = skb_headlen(skb); | 1287 | int start = skb_headlen(skb); |
1288 | struct sk_buff *frag_iter; | ||
1289 | int i, copy; | ||
1287 | 1290 | ||
1288 | if (offset > (int)skb->len - len) | 1291 | if (offset > (int)skb->len - len) |
1289 | goto fault; | 1292 | goto fault; |
@@ -1325,28 +1328,23 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |||
1325 | start = end; | 1328 | start = end; |
1326 | } | 1329 | } |
1327 | 1330 | ||
1328 | if (skb_shinfo(skb)->frag_list) { | 1331 | skb_walk_frags(skb, frag_iter) { |
1329 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1332 | int end; |
1330 | 1333 | ||
1331 | for (; list; list = list->next) { | 1334 | WARN_ON(start > offset + len); |
1332 | int end; | 1335 | |
1333 | 1336 | end = start + frag_iter->len; | |
1334 | WARN_ON(start > offset + len); | 1337 | if ((copy = end - offset) > 0) { |
1335 | 1338 | if (copy > len) | |
1336 | end = start + list->len; | 1339 | copy = len; |
1337 | if ((copy = end - offset) > 0) { | 1340 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
1338 | if (copy > len) | 1341 | goto fault; |
1339 | copy = len; | 1342 | if ((len -= copy) == 0) |
1340 | if (skb_copy_bits(list, offset - start, | 1343 | return 0; |
1341 | to, copy)) | 1344 | offset += copy; |
1342 | goto fault; | 1345 | to += copy; |
1343 | if ((len -= copy) == 0) | ||
1344 | return 0; | ||
1345 | offset += copy; | ||
1346 | to += copy; | ||
1347 | } | ||
1348 | start = end; | ||
1349 | } | 1346 | } |
1347 | start = end; | ||
1350 | } | 1348 | } |
1351 | if (!len) | 1349 | if (!len) |
1352 | return 0; | 1350 | return 0; |
@@ -1531,6 +1529,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1531 | .ops = &sock_pipe_buf_ops, | 1529 | .ops = &sock_pipe_buf_ops, |
1532 | .spd_release = sock_spd_release, | 1530 | .spd_release = sock_spd_release, |
1533 | }; | 1531 | }; |
1532 | struct sk_buff *frag_iter; | ||
1534 | struct sock *sk = skb->sk; | 1533 | struct sock *sk = skb->sk; |
1535 | 1534 | ||
1536 | /* | 1535 | /* |
@@ -1545,13 +1544,11 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
1545 | /* | 1544 | /* |
1546 | * now see if we have a frag_list to map | 1545 | * now see if we have a frag_list to map |
1547 | */ | 1546 | */ |
1548 | if (skb_shinfo(skb)->frag_list) { | 1547 | skb_walk_frags(skb, frag_iter) { |
1549 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1548 | if (!tlen) |
1550 | 1549 | break; | |
1551 | for (; list && tlen; list = list->next) { | 1550 | if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk)) |
1552 | if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) | 1551 | break; |
1553 | break; | ||
1554 | } | ||
1555 | } | 1552 | } |
1556 | 1553 | ||
1557 | done: | 1554 | done: |
@@ -1590,8 +1587,9 @@ done: | |||
1590 | 1587 | ||
1591 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | 1588 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
1592 | { | 1589 | { |
1593 | int i, copy; | ||
1594 | int start = skb_headlen(skb); | 1590 | int start = skb_headlen(skb); |
1591 | struct sk_buff *frag_iter; | ||
1592 | int i, copy; | ||
1595 | 1593 | ||
1596 | if (offset > (int)skb->len - len) | 1594 | if (offset > (int)skb->len - len) |
1597 | goto fault; | 1595 | goto fault; |
@@ -1632,28 +1630,24 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | |||
1632 | start = end; | 1630 | start = end; |
1633 | } | 1631 | } |
1634 | 1632 | ||
1635 | if (skb_shinfo(skb)->frag_list) { | 1633 | skb_walk_frags(skb, frag_iter) { |
1636 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1634 | int end; |
1637 | 1635 | ||
1638 | for (; list; list = list->next) { | 1636 | WARN_ON(start > offset + len); |
1639 | int end; | 1637 | |
1640 | 1638 | end = start + frag_iter->len; | |
1641 | WARN_ON(start > offset + len); | 1639 | if ((copy = end - offset) > 0) { |
1642 | 1640 | if (copy > len) | |
1643 | end = start + list->len; | 1641 | copy = len; |
1644 | if ((copy = end - offset) > 0) { | 1642 | if (skb_store_bits(frag_iter, offset - start, |
1645 | if (copy > len) | 1643 | from, copy)) |
1646 | copy = len; | 1644 | goto fault; |
1647 | if (skb_store_bits(list, offset - start, | 1645 | if ((len -= copy) == 0) |
1648 | from, copy)) | 1646 | return 0; |
1649 | goto fault; | 1647 | offset += copy; |
1650 | if ((len -= copy) == 0) | 1648 | from += copy; |
1651 | return 0; | ||
1652 | offset += copy; | ||
1653 | from += copy; | ||
1654 | } | ||
1655 | start = end; | ||
1656 | } | 1649 | } |
1650 | start = end; | ||
1657 | } | 1651 | } |
1658 | if (!len) | 1652 | if (!len) |
1659 | return 0; | 1653 | return 0; |
@@ -1670,6 +1664,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1670 | { | 1664 | { |
1671 | int start = skb_headlen(skb); | 1665 | int start = skb_headlen(skb); |
1672 | int i, copy = start - offset; | 1666 | int i, copy = start - offset; |
1667 | struct sk_buff *frag_iter; | ||
1673 | int pos = 0; | 1668 | int pos = 0; |
1674 | 1669 | ||
1675 | /* Checksum header. */ | 1670 | /* Checksum header. */ |
@@ -1709,29 +1704,25 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1709 | start = end; | 1704 | start = end; |
1710 | } | 1705 | } |
1711 | 1706 | ||
1712 | if (skb_shinfo(skb)->frag_list) { | 1707 | skb_walk_frags(skb, frag_iter) { |
1713 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1708 | int end; |
1714 | 1709 | ||
1715 | for (; list; list = list->next) { | 1710 | WARN_ON(start > offset + len); |
1716 | int end; | 1711 | |
1717 | 1712 | end = start + frag_iter->len; | |
1718 | WARN_ON(start > offset + len); | 1713 | if ((copy = end - offset) > 0) { |
1719 | 1714 | __wsum csum2; | |
1720 | end = start + list->len; | 1715 | if (copy > len) |
1721 | if ((copy = end - offset) > 0) { | 1716 | copy = len; |
1722 | __wsum csum2; | 1717 | csum2 = skb_checksum(frag_iter, offset - start, |
1723 | if (copy > len) | 1718 | copy, 0); |
1724 | copy = len; | 1719 | csum = csum_block_add(csum, csum2, pos); |
1725 | csum2 = skb_checksum(list, offset - start, | 1720 | if ((len -= copy) == 0) |
1726 | copy, 0); | 1721 | return csum; |
1727 | csum = csum_block_add(csum, csum2, pos); | 1722 | offset += copy; |
1728 | if ((len -= copy) == 0) | 1723 | pos += copy; |
1729 | return csum; | ||
1730 | offset += copy; | ||
1731 | pos += copy; | ||
1732 | } | ||
1733 | start = end; | ||
1734 | } | 1724 | } |
1725 | start = end; | ||
1735 | } | 1726 | } |
1736 | BUG_ON(len); | 1727 | BUG_ON(len); |
1737 | 1728 | ||
@@ -1746,6 +1737,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1746 | { | 1737 | { |
1747 | int start = skb_headlen(skb); | 1738 | int start = skb_headlen(skb); |
1748 | int i, copy = start - offset; | 1739 | int i, copy = start - offset; |
1740 | struct sk_buff *frag_iter; | ||
1749 | int pos = 0; | 1741 | int pos = 0; |
1750 | 1742 | ||
1751 | /* Copy header. */ | 1743 | /* Copy header. */ |
@@ -1790,31 +1782,27 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1790 | start = end; | 1782 | start = end; |
1791 | } | 1783 | } |
1792 | 1784 | ||
1793 | if (skb_shinfo(skb)->frag_list) { | 1785 | skb_walk_frags(skb, frag_iter) { |
1794 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1786 | __wsum csum2; |
1787 | int end; | ||
1795 | 1788 | ||
1796 | for (; list; list = list->next) { | 1789 | WARN_ON(start > offset + len); |
1797 | __wsum csum2; | 1790 | |
1798 | int end; | 1791 | end = start + frag_iter->len; |
1799 | 1792 | if ((copy = end - offset) > 0) { | |
1800 | WARN_ON(start > offset + len); | 1793 | if (copy > len) |
1801 | 1794 | copy = len; | |
1802 | end = start + list->len; | 1795 | csum2 = skb_copy_and_csum_bits(frag_iter, |
1803 | if ((copy = end - offset) > 0) { | 1796 | offset - start, |
1804 | if (copy > len) | 1797 | to, copy, 0); |
1805 | copy = len; | 1798 | csum = csum_block_add(csum, csum2, pos); |
1806 | csum2 = skb_copy_and_csum_bits(list, | 1799 | if ((len -= copy) == 0) |
1807 | offset - start, | 1800 | return csum; |
1808 | to, copy, 0); | 1801 | offset += copy; |
1809 | csum = csum_block_add(csum, csum2, pos); | 1802 | to += copy; |
1810 | if ((len -= copy) == 0) | 1803 | pos += copy; |
1811 | return csum; | ||
1812 | offset += copy; | ||
1813 | to += copy; | ||
1814 | pos += copy; | ||
1815 | } | ||
1816 | start = end; | ||
1817 | } | 1804 | } |
1805 | start = end; | ||
1818 | } | 1806 | } |
1819 | BUG_ON(len); | 1807 | BUG_ON(len); |
1820 | return csum; | 1808 | return csum; |
@@ -2324,8 +2312,7 @@ next_skb: | |||
2324 | st->frag_data = NULL; | 2312 | st->frag_data = NULL; |
2325 | } | 2313 | } |
2326 | 2314 | ||
2327 | if (st->root_skb == st->cur_skb && | 2315 | if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) { |
2328 | skb_shinfo(st->root_skb)->frag_list) { | ||
2329 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | 2316 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
2330 | st->frag_idx = 0; | 2317 | st->frag_idx = 0; |
2331 | goto next_skb; | 2318 | goto next_skb; |
@@ -2636,7 +2623,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) | |||
2636 | } else | 2623 | } else |
2637 | skb_get(fskb2); | 2624 | skb_get(fskb2); |
2638 | 2625 | ||
2639 | BUG_ON(skb_shinfo(nskb)->frag_list); | 2626 | SKB_FRAG_ASSERT(nskb); |
2640 | skb_shinfo(nskb)->frag_list = fskb2; | 2627 | skb_shinfo(nskb)->frag_list = fskb2; |
2641 | } | 2628 | } |
2642 | 2629 | ||
@@ -2661,30 +2648,40 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2661 | { | 2648 | { |
2662 | struct sk_buff *p = *head; | 2649 | struct sk_buff *p = *head; |
2663 | struct sk_buff *nskb; | 2650 | struct sk_buff *nskb; |
2651 | struct skb_shared_info *skbinfo = skb_shinfo(skb); | ||
2652 | struct skb_shared_info *pinfo = skb_shinfo(p); | ||
2664 | unsigned int headroom; | 2653 | unsigned int headroom; |
2665 | unsigned int len = skb_gro_len(skb); | 2654 | unsigned int len = skb_gro_len(skb); |
2655 | unsigned int offset = skb_gro_offset(skb); | ||
2656 | unsigned int headlen = skb_headlen(skb); | ||
2666 | 2657 | ||
2667 | if (p->len + len >= 65536) | 2658 | if (p->len + len >= 65536) |
2668 | return -E2BIG; | 2659 | return -E2BIG; |
2669 | 2660 | ||
2670 | if (skb_shinfo(p)->frag_list) | 2661 | if (pinfo->frag_list) |
2671 | goto merge; | 2662 | goto merge; |
2672 | else if (skb_headlen(skb) <= skb_gro_offset(skb)) { | 2663 | else if (headlen <= offset) { |
2673 | if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags > | 2664 | skb_frag_t *frag; |
2674 | MAX_SKB_FRAGS) | 2665 | skb_frag_t *frag2; |
2666 | int i = skbinfo->nr_frags; | ||
2667 | int nr_frags = pinfo->nr_frags + i; | ||
2668 | |||
2669 | offset -= headlen; | ||
2670 | |||
2671 | if (nr_frags > MAX_SKB_FRAGS) | ||
2675 | return -E2BIG; | 2672 | return -E2BIG; |
2676 | 2673 | ||
2677 | skb_shinfo(skb)->frags[0].page_offset += | 2674 | pinfo->nr_frags = nr_frags; |
2678 | skb_gro_offset(skb) - skb_headlen(skb); | 2675 | skbinfo->nr_frags = 0; |
2679 | skb_shinfo(skb)->frags[0].size -= | ||
2680 | skb_gro_offset(skb) - skb_headlen(skb); | ||
2681 | 2676 | ||
2682 | memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags, | 2677 | frag = pinfo->frags + nr_frags; |
2683 | skb_shinfo(skb)->frags, | 2678 | frag2 = skbinfo->frags + i; |
2684 | skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); | 2679 | do { |
2680 | *--frag = *--frag2; | ||
2681 | } while (--i); | ||
2685 | 2682 | ||
2686 | skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags; | 2683 | frag->page_offset += offset; |
2687 | skb_shinfo(skb)->nr_frags = 0; | 2684 | frag->size -= offset; |
2688 | 2685 | ||
2689 | skb->truesize -= skb->data_len; | 2686 | skb->truesize -= skb->data_len; |
2690 | skb->len -= skb->data_len; | 2687 | skb->len -= skb->data_len; |
@@ -2715,7 +2712,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2715 | 2712 | ||
2716 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); | 2713 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); |
2717 | skb_shinfo(nskb)->frag_list = p; | 2714 | skb_shinfo(nskb)->frag_list = p; |
2718 | skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size; | 2715 | skb_shinfo(nskb)->gso_size = pinfo->gso_size; |
2719 | skb_header_release(p); | 2716 | skb_header_release(p); |
2720 | nskb->prev = p; | 2717 | nskb->prev = p; |
2721 | 2718 | ||
@@ -2730,16 +2727,13 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) | |||
2730 | p = nskb; | 2727 | p = nskb; |
2731 | 2728 | ||
2732 | merge: | 2729 | merge: |
2733 | if (skb_gro_offset(skb) > skb_headlen(skb)) { | 2730 | if (offset > headlen) { |
2734 | skb_shinfo(skb)->frags[0].page_offset += | 2731 | skbinfo->frags[0].page_offset += offset - headlen; |
2735 | skb_gro_offset(skb) - skb_headlen(skb); | 2732 | skbinfo->frags[0].size -= offset - headlen; |
2736 | skb_shinfo(skb)->frags[0].size -= | 2733 | offset = headlen; |
2737 | skb_gro_offset(skb) - skb_headlen(skb); | ||
2738 | skb_gro_reset_offset(skb); | ||
2739 | skb_gro_pull(skb, skb_headlen(skb)); | ||
2740 | } | 2734 | } |
2741 | 2735 | ||
2742 | __skb_pull(skb, skb_gro_offset(skb)); | 2736 | __skb_pull(skb, offset); |
2743 | 2737 | ||
2744 | p->prev->next = skb; | 2738 | p->prev->next = skb; |
2745 | p->prev = skb; | 2739 | p->prev = skb; |
@@ -2786,6 +2780,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2786 | { | 2780 | { |
2787 | int start = skb_headlen(skb); | 2781 | int start = skb_headlen(skb); |
2788 | int i, copy = start - offset; | 2782 | int i, copy = start - offset; |
2783 | struct sk_buff *frag_iter; | ||
2789 | int elt = 0; | 2784 | int elt = 0; |
2790 | 2785 | ||
2791 | if (copy > 0) { | 2786 | if (copy > 0) { |
@@ -2819,26 +2814,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2819 | start = end; | 2814 | start = end; |
2820 | } | 2815 | } |
2821 | 2816 | ||
2822 | if (skb_shinfo(skb)->frag_list) { | 2817 | skb_walk_frags(skb, frag_iter) { |
2823 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 2818 | int end; |
2824 | |||
2825 | for (; list; list = list->next) { | ||
2826 | int end; | ||
2827 | 2819 | ||
2828 | WARN_ON(start > offset + len); | 2820 | WARN_ON(start > offset + len); |
2829 | 2821 | ||
2830 | end = start + list->len; | 2822 | end = start + frag_iter->len; |
2831 | if ((copy = end - offset) > 0) { | 2823 | if ((copy = end - offset) > 0) { |
2832 | if (copy > len) | 2824 | if (copy > len) |
2833 | copy = len; | 2825 | copy = len; |
2834 | elt += __skb_to_sgvec(list, sg+elt, offset - start, | 2826 | elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
2835 | copy); | 2827 | copy); |
2836 | if ((len -= copy) == 0) | 2828 | if ((len -= copy) == 0) |
2837 | return elt; | 2829 | return elt; |
2838 | offset += copy; | 2830 | offset += copy; |
2839 | } | ||
2840 | start = end; | ||
2841 | } | 2831 | } |
2832 | start = end; | ||
2842 | } | 2833 | } |
2843 | BUG_ON(len); | 2834 | BUG_ON(len); |
2844 | return elt; | 2835 | return elt; |
@@ -2886,7 +2877,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2886 | return -ENOMEM; | 2877 | return -ENOMEM; |
2887 | 2878 | ||
2888 | /* Easy case. Most of packets will go this way. */ | 2879 | /* Easy case. Most of packets will go this way. */ |
2889 | if (!skb_shinfo(skb)->frag_list) { | 2880 | if (!skb_has_frags(skb)) { |
2890 | /* A little of trouble, not enough of space for trailer. | 2881 | /* A little of trouble, not enough of space for trailer. |
2891 | * This should not happen, when stack is tuned to generate | 2882 | * This should not happen, when stack is tuned to generate |
2892 | * good frames. OK, on miss we reallocate and reserve even more | 2883 | * good frames. OK, on miss we reallocate and reserve even more |
@@ -2921,7 +2912,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2921 | 2912 | ||
2922 | if (skb1->next == NULL && tailbits) { | 2913 | if (skb1->next == NULL && tailbits) { |
2923 | if (skb_shinfo(skb1)->nr_frags || | 2914 | if (skb_shinfo(skb1)->nr_frags || |
2924 | skb_shinfo(skb1)->frag_list || | 2915 | skb_has_frags(skb1) || |
2925 | skb_tailroom(skb1) < tailbits) | 2916 | skb_tailroom(skb1) < tailbits) |
2926 | ntail = tailbits + 128; | 2917 | ntail = tailbits + 128; |
2927 | } | 2918 | } |
@@ -2930,7 +2921,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2930 | skb_cloned(skb1) || | 2921 | skb_cloned(skb1) || |
2931 | ntail || | 2922 | ntail || |
2932 | skb_shinfo(skb1)->nr_frags || | 2923 | skb_shinfo(skb1)->nr_frags || |
2933 | skb_shinfo(skb1)->frag_list) { | 2924 | skb_has_frags(skb1)) { |
2934 | struct sk_buff *skb2; | 2925 | struct sk_buff *skb2; |
2935 | 2926 | ||
2936 | /* Fuck, we are miserable poor guys... */ | 2927 | /* Fuck, we are miserable poor guys... */ |
@@ -3016,12 +3007,12 @@ EXPORT_SYMBOL_GPL(skb_tstamp_tx); | |||
3016 | */ | 3007 | */ |
3017 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) | 3008 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
3018 | { | 3009 | { |
3019 | if (unlikely(start > skb->len - 2) || | 3010 | if (unlikely(start > skb_headlen(skb)) || |
3020 | unlikely((int)start + off > skb->len - 2)) { | 3011 | unlikely((int)start + off > skb_headlen(skb) - 2)) { |
3021 | if (net_ratelimit()) | 3012 | if (net_ratelimit()) |
3022 | printk(KERN_WARNING | 3013 | printk(KERN_WARNING |
3023 | "bad partial csum: csum=%u/%u len=%u\n", | 3014 | "bad partial csum: csum=%u/%u len=%u\n", |
3024 | start, off, skb->len); | 3015 | start, off, skb_headlen(skb)); |
3025 | return false; | 3016 | return false; |
3026 | } | 3017 | } |
3027 | skb->ip_summed = CHECKSUM_PARTIAL; | 3018 | skb->ip_summed = CHECKSUM_PARTIAL; |