aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2013-06-07 01:11:46 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-07 17:39:05 -0400
commit28850dc7c71da9d0c0e39246e9ff6913f41f8d0a (patch)
tree1ff43aef62041c487d4175af44e73da90bb70ce9 /net/ipv4/tcp.c
parent5ee98591577aa63dbb9e78a0d142abc86b9063d0 (diff)
net: tcp: move GRO/GSO functions to tcp_offload
Would be good to make things explicit and move those functions to a new file called tcp_offload.c, thus make this similar to tcpv6_offload.c. While moving all related functions into tcp_offload.c, we can also make some of them static, since they are only used there. Also, add an explicit registration function. Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c241
1 files changed, 0 insertions, 241 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6a1cf95abc98..bc4246940f6c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2877,247 +2877,6 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2877EXPORT_SYMBOL(compat_tcp_getsockopt); 2877EXPORT_SYMBOL(compat_tcp_getsockopt);
2878#endif 2878#endif
2879 2879
2880struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2881 netdev_features_t features)
2882{
2883 struct sk_buff *segs = ERR_PTR(-EINVAL);
2884 struct tcphdr *th;
2885 unsigned int thlen;
2886 unsigned int seq;
2887 __be32 delta;
2888 unsigned int oldlen;
2889 unsigned int mss;
2890 struct sk_buff *gso_skb = skb;
2891 __sum16 newcheck;
2892 bool ooo_okay, copy_destructor;
2893
2894 if (!pskb_may_pull(skb, sizeof(*th)))
2895 goto out;
2896
2897 th = tcp_hdr(skb);
2898 thlen = th->doff * 4;
2899 if (thlen < sizeof(*th))
2900 goto out;
2901
2902 if (!pskb_may_pull(skb, thlen))
2903 goto out;
2904
2905 oldlen = (u16)~skb->len;
2906 __skb_pull(skb, thlen);
2907
2908 mss = tcp_skb_mss(skb);
2909 if (unlikely(skb->len <= mss))
2910 goto out;
2911
2912 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2913 /* Packet is from an untrusted source, reset gso_segs. */
2914 int type = skb_shinfo(skb)->gso_type;
2915
2916 if (unlikely(type &
2917 ~(SKB_GSO_TCPV4 |
2918 SKB_GSO_DODGY |
2919 SKB_GSO_TCP_ECN |
2920 SKB_GSO_TCPV6 |
2921 SKB_GSO_GRE |
2922 SKB_GSO_MPLS |
2923 SKB_GSO_UDP_TUNNEL |
2924 0) ||
2925 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2926 goto out;
2927
2928 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
2929
2930 segs = NULL;
2931 goto out;
2932 }
2933
2934 copy_destructor = gso_skb->destructor == tcp_wfree;
2935 ooo_okay = gso_skb->ooo_okay;
2936 /* All segments but the first should have ooo_okay cleared */
2937 skb->ooo_okay = 0;
2938
2939 segs = skb_segment(skb, features);
2940 if (IS_ERR(segs))
2941 goto out;
2942
2943 /* Only first segment might have ooo_okay set */
2944 segs->ooo_okay = ooo_okay;
2945
2946 delta = htonl(oldlen + (thlen + mss));
2947
2948 skb = segs;
2949 th = tcp_hdr(skb);
2950 seq = ntohl(th->seq);
2951
2952 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
2953 (__force u32)delta));
2954
2955 do {
2956 th->fin = th->psh = 0;
2957 th->check = newcheck;
2958
2959 if (skb->ip_summed != CHECKSUM_PARTIAL)
2960 th->check =
2961 csum_fold(csum_partial(skb_transport_header(skb),
2962 thlen, skb->csum));
2963
2964 seq += mss;
2965 if (copy_destructor) {
2966 skb->destructor = gso_skb->destructor;
2967 skb->sk = gso_skb->sk;
2968 /* {tcp|sock}_wfree() use exact truesize accounting :
2969 * sum(skb->truesize) MUST be exactly be gso_skb->truesize
2970 * So we account mss bytes of 'true size' for each segment.
2971 * The last segment will contain the remaining.
2972 */
2973 skb->truesize = mss;
2974 gso_skb->truesize -= mss;
2975 }
2976 skb = skb->next;
2977 th = tcp_hdr(skb);
2978
2979 th->seq = htonl(seq);
2980 th->cwr = 0;
2981 } while (skb->next);
2982
2983 /* Following permits TCP Small Queues to work well with GSO :
2984 * The callback to TCP stack will be called at the time last frag
2985 * is freed at TX completion, and not right now when gso_skb
2986 * is freed by GSO engine
2987 */
2988 if (copy_destructor) {
2989 swap(gso_skb->sk, skb->sk);
2990 swap(gso_skb->destructor, skb->destructor);
2991 swap(gso_skb->truesize, skb->truesize);
2992 }
2993
2994 delta = htonl(oldlen + (skb_tail_pointer(skb) -
2995 skb_transport_header(skb)) +
2996 skb->data_len);
2997 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2998 (__force u32)delta));
2999 if (skb->ip_summed != CHECKSUM_PARTIAL)
3000 th->check = csum_fold(csum_partial(skb_transport_header(skb),
3001 thlen, skb->csum));
3002
3003out:
3004 return segs;
3005}
3006EXPORT_SYMBOL(tcp_tso_segment);
3007
3008struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3009{
3010 struct sk_buff **pp = NULL;
3011 struct sk_buff *p;
3012 struct tcphdr *th;
3013 struct tcphdr *th2;
3014 unsigned int len;
3015 unsigned int thlen;
3016 __be32 flags;
3017 unsigned int mss = 1;
3018 unsigned int hlen;
3019 unsigned int off;
3020 int flush = 1;
3021 int i;
3022
3023 off = skb_gro_offset(skb);
3024 hlen = off + sizeof(*th);
3025 th = skb_gro_header_fast(skb, off);
3026 if (skb_gro_header_hard(skb, hlen)) {
3027 th = skb_gro_header_slow(skb, hlen, off);
3028 if (unlikely(!th))
3029 goto out;
3030 }
3031
3032 thlen = th->doff * 4;
3033 if (thlen < sizeof(*th))
3034 goto out;
3035
3036 hlen = off + thlen;
3037 if (skb_gro_header_hard(skb, hlen)) {
3038 th = skb_gro_header_slow(skb, hlen, off);
3039 if (unlikely(!th))
3040 goto out;
3041 }
3042
3043 skb_gro_pull(skb, thlen);
3044
3045 len = skb_gro_len(skb);
3046 flags = tcp_flag_word(th);
3047
3048 for (; (p = *head); head = &p->next) {
3049 if (!NAPI_GRO_CB(p)->same_flow)
3050 continue;
3051
3052 th2 = tcp_hdr(p);
3053
3054 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
3055 NAPI_GRO_CB(p)->same_flow = 0;
3056 continue;
3057 }
3058
3059 goto found;
3060 }
3061
3062 goto out_check_final;
3063
3064found:
3065 flush = NAPI_GRO_CB(p)->flush;
3066 flush |= (__force int)(flags & TCP_FLAG_CWR);
3067 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
3068 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
3069 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
3070 for (i = sizeof(*th); i < thlen; i += 4)
3071 flush |= *(u32 *)((u8 *)th + i) ^
3072 *(u32 *)((u8 *)th2 + i);
3073
3074 mss = tcp_skb_mss(p);
3075
3076 flush |= (len - 1) >= mss;
3077 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
3078
3079 if (flush || skb_gro_receive(head, skb)) {
3080 mss = 1;
3081 goto out_check_final;
3082 }
3083
3084 p = *head;
3085 th2 = tcp_hdr(p);
3086 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
3087
3088out_check_final:
3089 flush = len < mss;
3090 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
3091 TCP_FLAG_RST | TCP_FLAG_SYN |
3092 TCP_FLAG_FIN));
3093
3094 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
3095 pp = head;
3096
3097out:
3098 NAPI_GRO_CB(skb)->flush |= flush;
3099
3100 return pp;
3101}
3102EXPORT_SYMBOL(tcp_gro_receive);
3103
3104int tcp_gro_complete(struct sk_buff *skb)
3105{
3106 struct tcphdr *th = tcp_hdr(skb);
3107
3108 skb->csum_start = skb_transport_header(skb) - skb->head;
3109 skb->csum_offset = offsetof(struct tcphdr, check);
3110 skb->ip_summed = CHECKSUM_PARTIAL;
3111
3112 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
3113
3114 if (th->cwr)
3115 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
3116
3117 return 0;
3118}
3119EXPORT_SYMBOL(tcp_gro_complete);
3120
3121#ifdef CONFIG_TCP_MD5SIG 2880#ifdef CONFIG_TCP_MD5SIG
3122static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; 2881static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
3123static DEFINE_MUTEX(tcp_md5sig_mutex); 2882static DEFINE_MUTEX(tcp_md5sig_mutex);