diff options
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 97 |
1 files changed, 46 insertions, 51 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 46febcacb729..34f5db1e1c8b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -374,7 +374,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
374 | { | 374 | { |
375 | unsigned int mask; | 375 | unsigned int mask; |
376 | struct sock *sk = sock->sk; | 376 | struct sock *sk = sock->sk; |
377 | struct tcp_sock *tp = tcp_sk(sk); | 377 | const struct tcp_sock *tp = tcp_sk(sk); |
378 | 378 | ||
379 | sock_poll_wait(file, sk_sleep(sk), wait); | 379 | sock_poll_wait(file, sk_sleep(sk), wait); |
380 | if (sk->sk_state == TCP_LISTEN) | 380 | if (sk->sk_state == TCP_LISTEN) |
@@ -524,11 +524,11 @@ EXPORT_SYMBOL(tcp_ioctl); | |||
524 | 524 | ||
525 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) | 525 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) |
526 | { | 526 | { |
527 | TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; | 527 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; |
528 | tp->pushed_seq = tp->write_seq; | 528 | tp->pushed_seq = tp->write_seq; |
529 | } | 529 | } |
530 | 530 | ||
531 | static inline int forced_push(struct tcp_sock *tp) | 531 | static inline int forced_push(const struct tcp_sock *tp) |
532 | { | 532 | { |
533 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); | 533 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); |
534 | } | 534 | } |
@@ -540,7 +540,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb) | |||
540 | 540 | ||
541 | skb->csum = 0; | 541 | skb->csum = 0; |
542 | tcb->seq = tcb->end_seq = tp->write_seq; | 542 | tcb->seq = tcb->end_seq = tp->write_seq; |
543 | tcb->flags = TCPHDR_ACK; | 543 | tcb->tcp_flags = TCPHDR_ACK; |
544 | tcb->sacked = 0; | 544 | tcb->sacked = 0; |
545 | skb_header_release(skb); | 545 | skb_header_release(skb); |
546 | tcp_add_write_queue_tail(sk, skb); | 546 | tcp_add_write_queue_tail(sk, skb); |
@@ -813,7 +813,7 @@ new_segment: | |||
813 | goto wait_for_memory; | 813 | goto wait_for_memory; |
814 | 814 | ||
815 | if (can_coalesce) { | 815 | if (can_coalesce) { |
816 | skb_shinfo(skb)->frags[i - 1].size += copy; | 816 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
817 | } else { | 817 | } else { |
818 | get_page(page); | 818 | get_page(page); |
819 | skb_fill_page_desc(skb, i, page, offset, copy); | 819 | skb_fill_page_desc(skb, i, page, offset, copy); |
@@ -830,7 +830,7 @@ new_segment: | |||
830 | skb_shinfo(skb)->gso_segs = 0; | 830 | skb_shinfo(skb)->gso_segs = 0; |
831 | 831 | ||
832 | if (!copied) | 832 | if (!copied) |
833 | TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; | 833 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
834 | 834 | ||
835 | copied += copy; | 835 | copied += copy; |
836 | poffset += copy; | 836 | poffset += copy; |
@@ -891,9 +891,9 @@ EXPORT_SYMBOL(tcp_sendpage); | |||
891 | #define TCP_PAGE(sk) (sk->sk_sndmsg_page) | 891 | #define TCP_PAGE(sk) (sk->sk_sndmsg_page) |
892 | #define TCP_OFF(sk) (sk->sk_sndmsg_off) | 892 | #define TCP_OFF(sk) (sk->sk_sndmsg_off) |
893 | 893 | ||
894 | static inline int select_size(struct sock *sk, int sg) | 894 | static inline int select_size(const struct sock *sk, int sg) |
895 | { | 895 | { |
896 | struct tcp_sock *tp = tcp_sk(sk); | 896 | const struct tcp_sock *tp = tcp_sk(sk); |
897 | int tmp = tp->mss_cache; | 897 | int tmp = tp->mss_cache; |
898 | 898 | ||
899 | if (sg) { | 899 | if (sg) { |
@@ -1058,8 +1058,7 @@ new_segment: | |||
1058 | 1058 | ||
1059 | /* Update the skb. */ | 1059 | /* Update the skb. */ |
1060 | if (merge) { | 1060 | if (merge) { |
1061 | skb_shinfo(skb)->frags[i - 1].size += | 1061 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
1062 | copy; | ||
1063 | } else { | 1062 | } else { |
1064 | skb_fill_page_desc(skb, i, page, off, copy); | 1063 | skb_fill_page_desc(skb, i, page, off, copy); |
1065 | if (TCP_PAGE(sk)) { | 1064 | if (TCP_PAGE(sk)) { |
@@ -1074,7 +1073,7 @@ new_segment: | |||
1074 | } | 1073 | } |
1075 | 1074 | ||
1076 | if (!copied) | 1075 | if (!copied) |
1077 | TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; | 1076 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
1078 | 1077 | ||
1079 | tp->write_seq += copy; | 1078 | tp->write_seq += copy; |
1080 | TCP_SKB_CB(skb)->end_seq += copy; | 1079 | TCP_SKB_CB(skb)->end_seq += copy; |
@@ -1194,13 +1193,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) | |||
1194 | struct tcp_sock *tp = tcp_sk(sk); | 1193 | struct tcp_sock *tp = tcp_sk(sk); |
1195 | int time_to_ack = 0; | 1194 | int time_to_ack = 0; |
1196 | 1195 | ||
1197 | #if TCP_DEBUG | ||
1198 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); | 1196 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); |
1199 | 1197 | ||
1200 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), | 1198 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), |
1201 | "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", | 1199 | "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", |
1202 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); | 1200 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); |
1203 | #endif | ||
1204 | 1201 | ||
1205 | if (inet_csk_ack_scheduled(sk)) { | 1202 | if (inet_csk_ack_scheduled(sk)) { |
1206 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1203 | const struct inet_connection_sock *icsk = inet_csk(sk); |
@@ -2409,7 +2406,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2409 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, | 2406 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, |
2410 | unsigned int optlen) | 2407 | unsigned int optlen) |
2411 | { | 2408 | { |
2412 | struct inet_connection_sock *icsk = inet_csk(sk); | 2409 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2413 | 2410 | ||
2414 | if (level != SOL_TCP) | 2411 | if (level != SOL_TCP) |
2415 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | 2412 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, |
@@ -2431,9 +2428,9 @@ EXPORT_SYMBOL(compat_tcp_setsockopt); | |||
2431 | #endif | 2428 | #endif |
2432 | 2429 | ||
2433 | /* Return information about state of tcp endpoint in API format. */ | 2430 | /* Return information about state of tcp endpoint in API format. */ |
2434 | void tcp_get_info(struct sock *sk, struct tcp_info *info) | 2431 | void tcp_get_info(const struct sock *sk, struct tcp_info *info) |
2435 | { | 2432 | { |
2436 | struct tcp_sock *tp = tcp_sk(sk); | 2433 | const struct tcp_sock *tp = tcp_sk(sk); |
2437 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2434 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2438 | u32 now = tcp_time_stamp; | 2435 | u32 now = tcp_time_stamp; |
2439 | 2436 | ||
@@ -2455,8 +2452,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2455 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; | 2452 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; |
2456 | } | 2453 | } |
2457 | 2454 | ||
2458 | if (tp->ecn_flags&TCP_ECN_OK) | 2455 | if (tp->ecn_flags & TCP_ECN_OK) |
2459 | info->tcpi_options |= TCPI_OPT_ECN; | 2456 | info->tcpi_options |= TCPI_OPT_ECN; |
2457 | if (tp->ecn_flags & TCP_ECN_SEEN) | ||
2458 | info->tcpi_options |= TCPI_OPT_ECN_SEEN; | ||
2460 | 2459 | ||
2461 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); | 2460 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); |
2462 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); | 2461 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); |
@@ -2857,26 +2856,25 @@ EXPORT_SYMBOL(tcp_gro_complete); | |||
2857 | 2856 | ||
2858 | #ifdef CONFIG_TCP_MD5SIG | 2857 | #ifdef CONFIG_TCP_MD5SIG |
2859 | static unsigned long tcp_md5sig_users; | 2858 | static unsigned long tcp_md5sig_users; |
2860 | static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; | 2859 | static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool; |
2861 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); | 2860 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); |
2862 | 2861 | ||
2863 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) | 2862 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) |
2864 | { | 2863 | { |
2865 | int cpu; | 2864 | int cpu; |
2865 | |||
2866 | for_each_possible_cpu(cpu) { | 2866 | for_each_possible_cpu(cpu) { |
2867 | struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); | 2867 | struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); |
2868 | if (p) { | 2868 | |
2869 | if (p->md5_desc.tfm) | 2869 | if (p->md5_desc.tfm) |
2870 | crypto_free_hash(p->md5_desc.tfm); | 2870 | crypto_free_hash(p->md5_desc.tfm); |
2871 | kfree(p); | ||
2872 | } | ||
2873 | } | 2871 | } |
2874 | free_percpu(pool); | 2872 | free_percpu(pool); |
2875 | } | 2873 | } |
2876 | 2874 | ||
2877 | void tcp_free_md5sig_pool(void) | 2875 | void tcp_free_md5sig_pool(void) |
2878 | { | 2876 | { |
2879 | struct tcp_md5sig_pool * __percpu *pool = NULL; | 2877 | struct tcp_md5sig_pool __percpu *pool = NULL; |
2880 | 2878 | ||
2881 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2879 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2882 | if (--tcp_md5sig_users == 0) { | 2880 | if (--tcp_md5sig_users == 0) { |
@@ -2889,30 +2887,24 @@ void tcp_free_md5sig_pool(void) | |||
2889 | } | 2887 | } |
2890 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | 2888 | EXPORT_SYMBOL(tcp_free_md5sig_pool); |
2891 | 2889 | ||
2892 | static struct tcp_md5sig_pool * __percpu * | 2890 | static struct tcp_md5sig_pool __percpu * |
2893 | __tcp_alloc_md5sig_pool(struct sock *sk) | 2891 | __tcp_alloc_md5sig_pool(struct sock *sk) |
2894 | { | 2892 | { |
2895 | int cpu; | 2893 | int cpu; |
2896 | struct tcp_md5sig_pool * __percpu *pool; | 2894 | struct tcp_md5sig_pool __percpu *pool; |
2897 | 2895 | ||
2898 | pool = alloc_percpu(struct tcp_md5sig_pool *); | 2896 | pool = alloc_percpu(struct tcp_md5sig_pool); |
2899 | if (!pool) | 2897 | if (!pool) |
2900 | return NULL; | 2898 | return NULL; |
2901 | 2899 | ||
2902 | for_each_possible_cpu(cpu) { | 2900 | for_each_possible_cpu(cpu) { |
2903 | struct tcp_md5sig_pool *p; | ||
2904 | struct crypto_hash *hash; | 2901 | struct crypto_hash *hash; |
2905 | 2902 | ||
2906 | p = kzalloc(sizeof(*p), sk->sk_allocation); | ||
2907 | if (!p) | ||
2908 | goto out_free; | ||
2909 | *per_cpu_ptr(pool, cpu) = p; | ||
2910 | |||
2911 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | 2903 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); |
2912 | if (!hash || IS_ERR(hash)) | 2904 | if (!hash || IS_ERR(hash)) |
2913 | goto out_free; | 2905 | goto out_free; |
2914 | 2906 | ||
2915 | p->md5_desc.tfm = hash; | 2907 | per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; |
2916 | } | 2908 | } |
2917 | return pool; | 2909 | return pool; |
2918 | out_free: | 2910 | out_free: |
@@ -2920,9 +2912,9 @@ out_free: | |||
2920 | return NULL; | 2912 | return NULL; |
2921 | } | 2913 | } |
2922 | 2914 | ||
2923 | struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) | 2915 | struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) |
2924 | { | 2916 | { |
2925 | struct tcp_md5sig_pool * __percpu *pool; | 2917 | struct tcp_md5sig_pool __percpu *pool; |
2926 | int alloc = 0; | 2918 | int alloc = 0; |
2927 | 2919 | ||
2928 | retry: | 2920 | retry: |
@@ -2941,7 +2933,7 @@ retry: | |||
2941 | 2933 | ||
2942 | if (alloc) { | 2934 | if (alloc) { |
2943 | /* we cannot hold spinlock here because this may sleep. */ | 2935 | /* we cannot hold spinlock here because this may sleep. */ |
2944 | struct tcp_md5sig_pool * __percpu *p; | 2936 | struct tcp_md5sig_pool __percpu *p; |
2945 | 2937 | ||
2946 | p = __tcp_alloc_md5sig_pool(sk); | 2938 | p = __tcp_alloc_md5sig_pool(sk); |
2947 | spin_lock_bh(&tcp_md5sig_pool_lock); | 2939 | spin_lock_bh(&tcp_md5sig_pool_lock); |
@@ -2974,7 +2966,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | |||
2974 | */ | 2966 | */ |
2975 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | 2967 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) |
2976 | { | 2968 | { |
2977 | struct tcp_md5sig_pool * __percpu *p; | 2969 | struct tcp_md5sig_pool __percpu *p; |
2978 | 2970 | ||
2979 | local_bh_disable(); | 2971 | local_bh_disable(); |
2980 | 2972 | ||
@@ -2985,7 +2977,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | |||
2985 | spin_unlock(&tcp_md5sig_pool_lock); | 2977 | spin_unlock(&tcp_md5sig_pool_lock); |
2986 | 2978 | ||
2987 | if (p) | 2979 | if (p) |
2988 | return *this_cpu_ptr(p); | 2980 | return this_cpu_ptr(p); |
2989 | 2981 | ||
2990 | local_bh_enable(); | 2982 | local_bh_enable(); |
2991 | return NULL; | 2983 | return NULL; |
@@ -3000,23 +2992,25 @@ void tcp_put_md5sig_pool(void) | |||
3000 | EXPORT_SYMBOL(tcp_put_md5sig_pool); | 2992 | EXPORT_SYMBOL(tcp_put_md5sig_pool); |
3001 | 2993 | ||
3002 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, | 2994 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, |
3003 | struct tcphdr *th) | 2995 | const struct tcphdr *th) |
3004 | { | 2996 | { |
3005 | struct scatterlist sg; | 2997 | struct scatterlist sg; |
2998 | struct tcphdr hdr; | ||
3006 | int err; | 2999 | int err; |
3007 | 3000 | ||
3008 | __sum16 old_checksum = th->check; | 3001 | /* We are not allowed to change tcphdr, make a local copy */ |
3009 | th->check = 0; | 3002 | memcpy(&hdr, th, sizeof(hdr)); |
3003 | hdr.check = 0; | ||
3004 | |||
3010 | /* options aren't included in the hash */ | 3005 | /* options aren't included in the hash */ |
3011 | sg_init_one(&sg, th, sizeof(struct tcphdr)); | 3006 | sg_init_one(&sg, &hdr, sizeof(hdr)); |
3012 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); | 3007 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr)); |
3013 | th->check = old_checksum; | ||
3014 | return err; | 3008 | return err; |
3015 | } | 3009 | } |
3016 | EXPORT_SYMBOL(tcp_md5_hash_header); | 3010 | EXPORT_SYMBOL(tcp_md5_hash_header); |
3017 | 3011 | ||
3018 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | 3012 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, |
3019 | struct sk_buff *skb, unsigned header_len) | 3013 | const struct sk_buff *skb, unsigned int header_len) |
3020 | { | 3014 | { |
3021 | struct scatterlist sg; | 3015 | struct scatterlist sg; |
3022 | const struct tcphdr *tp = tcp_hdr(skb); | 3016 | const struct tcphdr *tp = tcp_hdr(skb); |
@@ -3035,8 +3029,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | |||
3035 | 3029 | ||
3036 | for (i = 0; i < shi->nr_frags; ++i) { | 3030 | for (i = 0; i < shi->nr_frags; ++i) { |
3037 | const struct skb_frag_struct *f = &shi->frags[i]; | 3031 | const struct skb_frag_struct *f = &shi->frags[i]; |
3038 | sg_set_page(&sg, f->page, f->size, f->page_offset); | 3032 | struct page *page = skb_frag_page(f); |
3039 | if (crypto_hash_update(desc, &sg, f->size)) | 3033 | sg_set_page(&sg, page, skb_frag_size(f), f->page_offset); |
3034 | if (crypto_hash_update(desc, &sg, skb_frag_size(f))) | ||
3040 | return 1; | 3035 | return 1; |
3041 | } | 3036 | } |
3042 | 3037 | ||
@@ -3048,7 +3043,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | |||
3048 | } | 3043 | } |
3049 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); | 3044 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); |
3050 | 3045 | ||
3051 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) | 3046 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) |
3052 | { | 3047 | { |
3053 | struct scatterlist sg; | 3048 | struct scatterlist sg; |
3054 | 3049 | ||