aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c166
1 files changed, 76 insertions, 90 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 90b96a11b974..30c7d35dd862 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3300,6 +3300,32 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3300 return elt; 3300 return elt;
3301} 3301}
3302 3302
3303/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3304 * sglist without mark the sg which contain last skb data as the end.
3305 * So the caller can mannipulate sg list as will when padding new data after
3306 * the first call without calling sg_unmark_end to expend sg list.
3307 *
3308 * Scenario to use skb_to_sgvec_nomark:
3309 * 1. sg_init_table
3310 * 2. skb_to_sgvec_nomark(payload1)
3311 * 3. skb_to_sgvec_nomark(payload2)
3312 *
3313 * This is equivalent to:
3314 * 1. sg_init_table
3315 * 2. skb_to_sgvec(payload1)
3316 * 3. sg_unmark_end
3317 * 4. skb_to_sgvec(payload2)
3318 *
3319 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
3320 * is more preferable.
3321 */
3322int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3323 int offset, int len)
3324{
3325 return __skb_to_sgvec(skb, sg, offset, len);
3326}
3327EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3328
3303int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3329int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3304{ 3330{
3305 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3331 int nsg = __skb_to_sgvec(skb, sg, offset, len);
@@ -3562,15 +3588,47 @@ static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3562 return 0; 3588 return 0;
3563} 3589}
3564 3590
3591#define MAX_TCP_HDR_LEN (15 * 4)
3592
3593static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
3594 typeof(IPPROTO_IP) proto,
3595 unsigned int off)
3596{
3597 switch (proto) {
3598 int err;
3599
3600 case IPPROTO_TCP:
3601 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
3602 off + MAX_TCP_HDR_LEN);
3603 if (!err && !skb_partial_csum_set(skb, off,
3604 offsetof(struct tcphdr,
3605 check)))
3606 err = -EPROTO;
3607 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
3608
3609 case IPPROTO_UDP:
3610 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
3611 off + sizeof(struct udphdr));
3612 if (!err && !skb_partial_csum_set(skb, off,
3613 offsetof(struct udphdr,
3614 check)))
3615 err = -EPROTO;
3616 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
3617 }
3618
3619 return ERR_PTR(-EPROTO);
3620}
3621
3565/* This value should be large enough to cover a tagged ethernet header plus 3622/* This value should be large enough to cover a tagged ethernet header plus
3566 * maximally sized IP and TCP or UDP headers. 3623 * maximally sized IP and TCP or UDP headers.
3567 */ 3624 */
3568#define MAX_IP_HDR_LEN 128 3625#define MAX_IP_HDR_LEN 128
3569 3626
3570static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate) 3627static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
3571{ 3628{
3572 unsigned int off; 3629 unsigned int off;
3573 bool fragment; 3630 bool fragment;
3631 __sum16 *csum;
3574 int err; 3632 int err;
3575 3633
3576 fragment = false; 3634 fragment = false;
@@ -3591,51 +3649,15 @@ static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
3591 if (fragment) 3649 if (fragment)
3592 goto out; 3650 goto out;
3593 3651
3594 switch (ip_hdr(skb)->protocol) { 3652 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
3595 case IPPROTO_TCP: 3653 if (IS_ERR(csum))
3596 err = skb_maybe_pull_tail(skb, 3654 return PTR_ERR(csum);
3597 off + sizeof(struct tcphdr),
3598 MAX_IP_HDR_LEN);
3599 if (err < 0)
3600 goto out;
3601
3602 if (!skb_partial_csum_set(skb, off,
3603 offsetof(struct tcphdr, check))) {
3604 err = -EPROTO;
3605 goto out;
3606 }
3607
3608 if (recalculate)
3609 tcp_hdr(skb)->check =
3610 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3611 ip_hdr(skb)->daddr,
3612 skb->len - off,
3613 IPPROTO_TCP, 0);
3614 break;
3615 case IPPROTO_UDP:
3616 err = skb_maybe_pull_tail(skb,
3617 off + sizeof(struct udphdr),
3618 MAX_IP_HDR_LEN);
3619 if (err < 0)
3620 goto out;
3621
3622 if (!skb_partial_csum_set(skb, off,
3623 offsetof(struct udphdr, check))) {
3624 err = -EPROTO;
3625 goto out;
3626 }
3627
3628 if (recalculate)
3629 udp_hdr(skb)->check =
3630 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3631 ip_hdr(skb)->daddr,
3632 skb->len - off,
3633 IPPROTO_UDP, 0);
3634 break;
3635 default:
3636 goto out;
3637 }
3638 3655
3656 if (recalculate)
3657 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3658 ip_hdr(skb)->daddr,
3659 skb->len - off,
3660 ip_hdr(skb)->protocol, 0);
3639 err = 0; 3661 err = 0;
3640 3662
3641out: 3663out:
@@ -3658,6 +3680,7 @@ static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3658 unsigned int len; 3680 unsigned int len;
3659 bool fragment; 3681 bool fragment;
3660 bool done; 3682 bool done;
3683 __sum16 *csum;
3661 3684
3662 fragment = false; 3685 fragment = false;
3663 done = false; 3686 done = false;
@@ -3735,51 +3758,14 @@ static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3735 if (!done || fragment) 3758 if (!done || fragment)
3736 goto out; 3759 goto out;
3737 3760
3738 switch (nexthdr) { 3761 csum = skb_checksum_setup_ip(skb, nexthdr, off);
3739 case IPPROTO_TCP: 3762 if (IS_ERR(csum))
3740 err = skb_maybe_pull_tail(skb, 3763 return PTR_ERR(csum);
3741 off + sizeof(struct tcphdr),
3742 MAX_IPV6_HDR_LEN);
3743 if (err < 0)
3744 goto out;
3745
3746 if (!skb_partial_csum_set(skb, off,
3747 offsetof(struct tcphdr, check))) {
3748 err = -EPROTO;
3749 goto out;
3750 }
3751
3752 if (recalculate)
3753 tcp_hdr(skb)->check =
3754 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3755 &ipv6_hdr(skb)->daddr,
3756 skb->len - off,
3757 IPPROTO_TCP, 0);
3758 break;
3759 case IPPROTO_UDP:
3760 err = skb_maybe_pull_tail(skb,
3761 off + sizeof(struct udphdr),
3762 MAX_IPV6_HDR_LEN);
3763 if (err < 0)
3764 goto out;
3765
3766 if (!skb_partial_csum_set(skb, off,
3767 offsetof(struct udphdr, check))) {
3768 err = -EPROTO;
3769 goto out;
3770 }
3771
3772 if (recalculate)
3773 udp_hdr(skb)->check =
3774 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3775 &ipv6_hdr(skb)->daddr,
3776 skb->len - off,
3777 IPPROTO_UDP, 0);
3778 break;
3779 default:
3780 goto out;
3781 }
3782 3764
3765 if (recalculate)
3766 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3767 &ipv6_hdr(skb)->daddr,
3768 skb->len - off, nexthdr, 0);
3783 err = 0; 3769 err = 0;
3784 3770
3785out: 3771out:
@@ -3797,7 +3783,7 @@ int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3797 3783
3798 switch (skb->protocol) { 3784 switch (skb->protocol) {
3799 case htons(ETH_P_IP): 3785 case htons(ETH_P_IP):
3800 err = skb_checksum_setup_ip(skb, recalculate); 3786 err = skb_checksum_setup_ipv4(skb, recalculate);
3801 break; 3787 break;
3802 3788
3803 case htons(ETH_P_IPV6): 3789 case htons(ETH_P_IPV6):