aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Durrant <Paul.Durrant@citrix.com>2014-01-09 05:02:46 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-14 17:24:19 -0500
commited1f50c3a7c1ad1b1b4d584308eab77d57a330f8 (patch)
tree81bf10db834db5e50a915548acf87c492688ec0e
parentb86f81cca9442ce6cfbe76d10fb8d2c61122ae12 (diff)
net: add skb_checksum_setup
This patch adds a function to set up the partial checksum offset for IP packets (and optionally re-calculate the pseudo-header checksum) into the core network code. The implementation was previously private and duplicated between xen-netback and xen-netfront, however it is not xen-specific and is potentially useful to any network driver. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Cc: David Miller <davem@davemloft.net> Cc: Eric Dumazet <edumazet@google.com> Cc: Veaceslav Falico <vfalico@redhat.com> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Cc: Nicolas Dichtel <nicolas.dichtel@6wind.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--net/core/skbuff.c273
2 files changed, 275 insertions, 0 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d97f2d07d02b..48b760505cb6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2893,6 +2893,8 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2893 2893
2894bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2894bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2895 2895
2896int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
2897
2896u32 __skb_get_poff(const struct sk_buff *skb); 2898u32 __skb_get_poff(const struct sk_buff *skb);
2897 2899
2898/** 2900/**
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1d641e781f85..15057d29b010 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -65,6 +65,7 @@
65#include <net/dst.h> 65#include <net/dst.h>
66#include <net/sock.h> 66#include <net/sock.h>
67#include <net/checksum.h> 67#include <net/checksum.h>
68#include <net/ip6_checksum.h>
68#include <net/xfrm.h> 69#include <net/xfrm.h>
69 70
70#include <asm/uaccess.h> 71#include <asm/uaccess.h>
@@ -3549,6 +3550,278 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3549} 3550}
3550EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3551EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3551 3552
3553static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3554 unsigned int max)
3555{
3556 if (skb_headlen(skb) >= len)
3557 return 0;
3558
3559 /* If we need to pullup then pullup to the max, so we
3560 * won't need to do it again.
3561 */
3562 if (max > skb->len)
3563 max = skb->len;
3564
3565 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3566 return -ENOMEM;
3567
3568 if (skb_headlen(skb) < len)
3569 return -EPROTO;
3570
3571 return 0;
3572}
3573
3574/* This value should be large enough to cover a tagged ethernet header plus
3575 * maximally sized IP and TCP or UDP headers.
3576 */
3577#define MAX_IP_HDR_LEN 128
3578
3579static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
3580{
3581 unsigned int off;
3582 bool fragment;
3583 int err;
3584
3585 fragment = false;
3586
3587 err = skb_maybe_pull_tail(skb,
3588 sizeof(struct iphdr),
3589 MAX_IP_HDR_LEN);
3590 if (err < 0)
3591 goto out;
3592
3593 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3594 fragment = true;
3595
3596 off = ip_hdrlen(skb);
3597
3598 err = -EPROTO;
3599
3600 if (fragment)
3601 goto out;
3602
3603 switch (ip_hdr(skb)->protocol) {
3604 case IPPROTO_TCP:
3605 err = skb_maybe_pull_tail(skb,
3606 off + sizeof(struct tcphdr),
3607 MAX_IP_HDR_LEN);
3608 if (err < 0)
3609 goto out;
3610
3611 if (!skb_partial_csum_set(skb, off,
3612 offsetof(struct tcphdr, check))) {
3613 err = -EPROTO;
3614 goto out;
3615 }
3616
3617 if (recalculate)
3618 tcp_hdr(skb)->check =
3619 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3620 ip_hdr(skb)->daddr,
3621 skb->len - off,
3622 IPPROTO_TCP, 0);
3623 break;
3624 case IPPROTO_UDP:
3625 err = skb_maybe_pull_tail(skb,
3626 off + sizeof(struct udphdr),
3627 MAX_IP_HDR_LEN);
3628 if (err < 0)
3629 goto out;
3630
3631 if (!skb_partial_csum_set(skb, off,
3632 offsetof(struct udphdr, check))) {
3633 err = -EPROTO;
3634 goto out;
3635 }
3636
3637 if (recalculate)
3638 udp_hdr(skb)->check =
3639 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3640 ip_hdr(skb)->daddr,
3641 skb->len - off,
3642 IPPROTO_UDP, 0);
3643 break;
3644 default:
3645 goto out;
3646 }
3647
3648 err = 0;
3649
3650out:
3651 return err;
3652}
3653
3654/* This value should be large enough to cover a tagged ethernet header plus
3655 * an IPv6 header, all options, and a maximal TCP or UDP header.
3656 */
3657#define MAX_IPV6_HDR_LEN 256
3658
3659#define OPT_HDR(type, skb, off) \
3660 (type *)(skb_network_header(skb) + (off))
3661
3662static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3663{
3664 int err;
3665 u8 nexthdr;
3666 unsigned int off;
3667 unsigned int len;
3668 bool fragment;
3669 bool done;
3670
3671 fragment = false;
3672 done = false;
3673
3674 off = sizeof(struct ipv6hdr);
3675
3676 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3677 if (err < 0)
3678 goto out;
3679
3680 nexthdr = ipv6_hdr(skb)->nexthdr;
3681
3682 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3683 while (off <= len && !done) {
3684 switch (nexthdr) {
3685 case IPPROTO_DSTOPTS:
3686 case IPPROTO_HOPOPTS:
3687 case IPPROTO_ROUTING: {
3688 struct ipv6_opt_hdr *hp;
3689
3690 err = skb_maybe_pull_tail(skb,
3691 off +
3692 sizeof(struct ipv6_opt_hdr),
3693 MAX_IPV6_HDR_LEN);
3694 if (err < 0)
3695 goto out;
3696
3697 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3698 nexthdr = hp->nexthdr;
3699 off += ipv6_optlen(hp);
3700 break;
3701 }
3702 case IPPROTO_AH: {
3703 struct ip_auth_hdr *hp;
3704
3705 err = skb_maybe_pull_tail(skb,
3706 off +
3707 sizeof(struct ip_auth_hdr),
3708 MAX_IPV6_HDR_LEN);
3709 if (err < 0)
3710 goto out;
3711
3712 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3713 nexthdr = hp->nexthdr;
3714 off += ipv6_authlen(hp);
3715 break;
3716 }
3717 case IPPROTO_FRAGMENT: {
3718 struct frag_hdr *hp;
3719
3720 err = skb_maybe_pull_tail(skb,
3721 off +
3722 sizeof(struct frag_hdr),
3723 MAX_IPV6_HDR_LEN);
3724 if (err < 0)
3725 goto out;
3726
3727 hp = OPT_HDR(struct frag_hdr, skb, off);
3728
3729 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3730 fragment = true;
3731
3732 nexthdr = hp->nexthdr;
3733 off += sizeof(struct frag_hdr);
3734 break;
3735 }
3736 default:
3737 done = true;
3738 break;
3739 }
3740 }
3741
3742 err = -EPROTO;
3743
3744 if (!done || fragment)
3745 goto out;
3746
3747 switch (nexthdr) {
3748 case IPPROTO_TCP:
3749 err = skb_maybe_pull_tail(skb,
3750 off + sizeof(struct tcphdr),
3751 MAX_IPV6_HDR_LEN);
3752 if (err < 0)
3753 goto out;
3754
3755 if (!skb_partial_csum_set(skb, off,
3756 offsetof(struct tcphdr, check))) {
3757 err = -EPROTO;
3758 goto out;
3759 }
3760
3761 if (recalculate)
3762 tcp_hdr(skb)->check =
3763 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3764 &ipv6_hdr(skb)->daddr,
3765 skb->len - off,
3766 IPPROTO_TCP, 0);
3767 break;
3768 case IPPROTO_UDP:
3769 err = skb_maybe_pull_tail(skb,
3770 off + sizeof(struct udphdr),
3771 MAX_IPV6_HDR_LEN);
3772 if (err < 0)
3773 goto out;
3774
3775 if (!skb_partial_csum_set(skb, off,
3776 offsetof(struct udphdr, check))) {
3777 err = -EPROTO;
3778 goto out;
3779 }
3780
3781 if (recalculate)
3782 udp_hdr(skb)->check =
3783 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3784 &ipv6_hdr(skb)->daddr,
3785 skb->len - off,
3786 IPPROTO_UDP, 0);
3787 break;
3788 default:
3789 goto out;
3790 }
3791
3792 err = 0;
3793
3794out:
3795 return err;
3796}
3797
3798/**
3799 * skb_checksum_setup - set up partial checksum offset
3800 * @skb: the skb to set up
3801 * @recalculate: if true the pseudo-header checksum will be recalculated
3802 */
3803int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3804{
3805 int err;
3806
3807 switch (skb->protocol) {
3808 case htons(ETH_P_IP):
3809 err = skb_checksum_setup_ip(skb, recalculate);
3810 break;
3811
3812 case htons(ETH_P_IPV6):
3813 err = skb_checksum_setup_ipv6(skb, recalculate);
3814 break;
3815
3816 default:
3817 err = -EPROTO;
3818 break;
3819 }
3820
3821 return err;
3822}
3823EXPORT_SYMBOL(skb_checksum_setup);
3824
3552void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3825void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3553{ 3826{
3554 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3827 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",