aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c56
1 files changed, 26 insertions, 30 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46febcacb72..4c0da24fb64 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -524,7 +524,7 @@ EXPORT_SYMBOL(tcp_ioctl);
524 524
525static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 525static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
526{ 526{
527 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 527 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
528 tp->pushed_seq = tp->write_seq; 528 tp->pushed_seq = tp->write_seq;
529} 529}
530 530
@@ -540,7 +540,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
540 540
541 skb->csum = 0; 541 skb->csum = 0;
542 tcb->seq = tcb->end_seq = tp->write_seq; 542 tcb->seq = tcb->end_seq = tp->write_seq;
543 tcb->flags = TCPHDR_ACK; 543 tcb->tcp_flags = TCPHDR_ACK;
544 tcb->sacked = 0; 544 tcb->sacked = 0;
545 skb_header_release(skb); 545 skb_header_release(skb);
546 tcp_add_write_queue_tail(sk, skb); 546 tcp_add_write_queue_tail(sk, skb);
@@ -830,7 +830,7 @@ new_segment:
830 skb_shinfo(skb)->gso_segs = 0; 830 skb_shinfo(skb)->gso_segs = 0;
831 831
832 if (!copied) 832 if (!copied)
833 TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; 833 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
834 834
835 copied += copy; 835 copied += copy;
836 poffset += copy; 836 poffset += copy;
@@ -1074,7 +1074,7 @@ new_segment:
1074 } 1074 }
1075 1075
1076 if (!copied) 1076 if (!copied)
1077 TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH; 1077 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1078 1078
1079 tp->write_seq += copy; 1079 tp->write_seq += copy;
1080 TCP_SKB_CB(skb)->end_seq += copy; 1080 TCP_SKB_CB(skb)->end_seq += copy;
@@ -2455,8 +2455,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2455 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2455 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2456 } 2456 }
2457 2457
2458 if (tp->ecn_flags&TCP_ECN_OK) 2458 if (tp->ecn_flags & TCP_ECN_OK)
2459 info->tcpi_options |= TCPI_OPT_ECN; 2459 info->tcpi_options |= TCPI_OPT_ECN;
2460 if (tp->ecn_flags & TCP_ECN_SEEN)
2461 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2460 2462
2461 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2463 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2462 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2464 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
@@ -2857,26 +2859,25 @@ EXPORT_SYMBOL(tcp_gro_complete);
2857 2859
2858#ifdef CONFIG_TCP_MD5SIG 2860#ifdef CONFIG_TCP_MD5SIG
2859static unsigned long tcp_md5sig_users; 2861static unsigned long tcp_md5sig_users;
2860static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; 2862static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
2861static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2863static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2862 2864
2863static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) 2865static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2864{ 2866{
2865 int cpu; 2867 int cpu;
2868
2866 for_each_possible_cpu(cpu) { 2869 for_each_possible_cpu(cpu) {
2867 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); 2870 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2868 if (p) { 2871
2869 if (p->md5_desc.tfm) 2872 if (p->md5_desc.tfm)
2870 crypto_free_hash(p->md5_desc.tfm); 2873 crypto_free_hash(p->md5_desc.tfm);
2871 kfree(p);
2872 }
2873 } 2874 }
2874 free_percpu(pool); 2875 free_percpu(pool);
2875} 2876}
2876 2877
2877void tcp_free_md5sig_pool(void) 2878void tcp_free_md5sig_pool(void)
2878{ 2879{
2879 struct tcp_md5sig_pool * __percpu *pool = NULL; 2880 struct tcp_md5sig_pool __percpu *pool = NULL;
2880 2881
2881 spin_lock_bh(&tcp_md5sig_pool_lock); 2882 spin_lock_bh(&tcp_md5sig_pool_lock);
2882 if (--tcp_md5sig_users == 0) { 2883 if (--tcp_md5sig_users == 0) {
@@ -2889,30 +2890,24 @@ void tcp_free_md5sig_pool(void)
2889} 2890}
2890EXPORT_SYMBOL(tcp_free_md5sig_pool); 2891EXPORT_SYMBOL(tcp_free_md5sig_pool);
2891 2892
2892static struct tcp_md5sig_pool * __percpu * 2893static struct tcp_md5sig_pool __percpu *
2893__tcp_alloc_md5sig_pool(struct sock *sk) 2894__tcp_alloc_md5sig_pool(struct sock *sk)
2894{ 2895{
2895 int cpu; 2896 int cpu;
2896 struct tcp_md5sig_pool * __percpu *pool; 2897 struct tcp_md5sig_pool __percpu *pool;
2897 2898
2898 pool = alloc_percpu(struct tcp_md5sig_pool *); 2899 pool = alloc_percpu(struct tcp_md5sig_pool);
2899 if (!pool) 2900 if (!pool)
2900 return NULL; 2901 return NULL;
2901 2902
2902 for_each_possible_cpu(cpu) { 2903 for_each_possible_cpu(cpu) {
2903 struct tcp_md5sig_pool *p;
2904 struct crypto_hash *hash; 2904 struct crypto_hash *hash;
2905 2905
2906 p = kzalloc(sizeof(*p), sk->sk_allocation);
2907 if (!p)
2908 goto out_free;
2909 *per_cpu_ptr(pool, cpu) = p;
2910
2911 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 2906 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2912 if (!hash || IS_ERR(hash)) 2907 if (!hash || IS_ERR(hash))
2913 goto out_free; 2908 goto out_free;
2914 2909
2915 p->md5_desc.tfm = hash; 2910 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
2916 } 2911 }
2917 return pool; 2912 return pool;
2918out_free: 2913out_free:
@@ -2920,9 +2915,9 @@ out_free:
2920 return NULL; 2915 return NULL;
2921} 2916}
2922 2917
2923struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 2918struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2924{ 2919{
2925 struct tcp_md5sig_pool * __percpu *pool; 2920 struct tcp_md5sig_pool __percpu *pool;
2926 int alloc = 0; 2921 int alloc = 0;
2927 2922
2928retry: 2923retry:
@@ -2941,7 +2936,7 @@ retry:
2941 2936
2942 if (alloc) { 2937 if (alloc) {
2943 /* we cannot hold spinlock here because this may sleep. */ 2938 /* we cannot hold spinlock here because this may sleep. */
2944 struct tcp_md5sig_pool * __percpu *p; 2939 struct tcp_md5sig_pool __percpu *p;
2945 2940
2946 p = __tcp_alloc_md5sig_pool(sk); 2941 p = __tcp_alloc_md5sig_pool(sk);
2947 spin_lock_bh(&tcp_md5sig_pool_lock); 2942 spin_lock_bh(&tcp_md5sig_pool_lock);
@@ -2974,7 +2969,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2974 */ 2969 */
2975struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 2970struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2976{ 2971{
2977 struct tcp_md5sig_pool * __percpu *p; 2972 struct tcp_md5sig_pool __percpu *p;
2978 2973
2979 local_bh_disable(); 2974 local_bh_disable();
2980 2975
@@ -2985,7 +2980,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2985 spin_unlock(&tcp_md5sig_pool_lock); 2980 spin_unlock(&tcp_md5sig_pool_lock);
2986 2981
2987 if (p) 2982 if (p)
2988 return *this_cpu_ptr(p); 2983 return this_cpu_ptr(p);
2989 2984
2990 local_bh_enable(); 2985 local_bh_enable();
2991 return NULL; 2986 return NULL;
@@ -3035,7 +3030,8 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3035 3030
3036 for (i = 0; i < shi->nr_frags; ++i) { 3031 for (i = 0; i < shi->nr_frags; ++i) {
3037 const struct skb_frag_struct *f = &shi->frags[i]; 3032 const struct skb_frag_struct *f = &shi->frags[i];
3038 sg_set_page(&sg, f->page, f->size, f->page_offset); 3033 struct page *page = skb_frag_page(f);
3034 sg_set_page(&sg, page, f->size, f->page_offset);
3039 if (crypto_hash_update(desc, &sg, f->size)) 3035 if (crypto_hash_update(desc, &sg, f->size))
3040 return 1; 3036 return 1;
3041 } 3037 }