aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-09-25 16:00:40 -0400
committerDavid S. Miller <davem@davemloft.net>2015-09-25 16:00:40 -0400
commit4d54d86546f62c7c4a0fe3b36a64c5e3b98ce1a9 (patch)
treebd48e072bacebb47bc6cd4ebb3483e9bd9da21b0 /net/ipv4/tcp_output.c
parent6ea29da1d04f56e167ec8cc5ed15e927997d9d67 (diff)
parent1b70e977cef6ce7e7411c9bbec21f9adc8e29097 (diff)
Merge branch 'listener-sock-const'
Eric Dumazet says: ==================== dccp/tcp: constify listener sock Another patch bomb to prepare lockless TCP/DCCP LISTEN handling. SYNACK retransmits are built and sent without listener socket being locked. Soon, initial SYNACK packets will have same property. This series makes sure we did not something wrong with this model, by adding a const qualifier in all the paths taken from synack building and transmit, for IPv4/IPv6 and TCP/dccp. The only potential problem was the rewrite of ecn bits for connections with DCTCP as congestion module, but this was a very minor one. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4cd0b50d4e46..53ce6cf55598 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -357,14 +357,10 @@ static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
357} 357}
358 358
359static void 359static void
360tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th, 360tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
361 struct sock *sk)
362{ 361{
363 if (inet_rsk(req)->ecn_ok) { 362 if (inet_rsk(req)->ecn_ok)
364 th->ece = 1; 363 th->ece = 1;
365 if (tcp_ca_needs_ecn(sk))
366 INET_ECN_xmit(sk);
367 }
368} 364}
369 365
370/* Set up ECN state for a packet on a ESTABLISHED socket that is about to 366/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
@@ -612,12 +608,11 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
612} 608}
613 609
614/* Set up TCP options for SYN-ACKs. */ 610/* Set up TCP options for SYN-ACKs. */
615static unsigned int tcp_synack_options(struct sock *sk, 611static unsigned int tcp_synack_options(struct request_sock *req,
616 struct request_sock *req, 612 unsigned int mss, struct sk_buff *skb,
617 unsigned int mss, struct sk_buff *skb, 613 struct tcp_out_options *opts,
618 struct tcp_out_options *opts, 614 const struct tcp_md5sig_key *md5,
619 const struct tcp_md5sig_key *md5, 615 struct tcp_fastopen_cookie *foc)
620 struct tcp_fastopen_cookie *foc)
621{ 616{
622 struct inet_request_sock *ireq = inet_rsk(req); 617 struct inet_request_sock *ireq = inet_rsk(req);
623 unsigned int remaining = MAX_TCP_OPTION_SPACE; 618 unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -2949,20 +2944,25 @@ int tcp_send_synack(struct sock *sk)
2949 * Allocate one skb and build a SYNACK packet. 2944 * Allocate one skb and build a SYNACK packet.
2950 * @dst is consumed : Caller should not use it again. 2945 * @dst is consumed : Caller should not use it again.
2951 */ 2946 */
2952struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2947struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
2953 struct request_sock *req, 2948 struct request_sock *req,
2954 struct tcp_fastopen_cookie *foc) 2949 struct tcp_fastopen_cookie *foc)
2955{ 2950{
2956 struct tcp_out_options opts;
2957 struct inet_request_sock *ireq = inet_rsk(req); 2951 struct inet_request_sock *ireq = inet_rsk(req);
2958 struct tcp_sock *tp = tcp_sk(sk); 2952 const struct tcp_sock *tp = tcp_sk(sk);
2959 struct tcphdr *th;
2960 struct sk_buff *skb;
2961 struct tcp_md5sig_key *md5 = NULL; 2953 struct tcp_md5sig_key *md5 = NULL;
2954 struct tcp_out_options opts;
2955 struct sk_buff *skb;
2962 int tcp_header_size; 2956 int tcp_header_size;
2957 struct tcphdr *th;
2958 u16 user_mss;
2963 int mss; 2959 int mss;
2964 2960
2965 skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC); 2961 /* sk is a const pointer, because we want to express multiple cpus
2962 * might call us concurrently.
2963 * sock_wmalloc() will change sk->sk_wmem_alloc in an atomic way.
2964 */
2965 skb = sock_wmalloc((struct sock *)sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
2966 if (unlikely(!skb)) { 2966 if (unlikely(!skb)) {
2967 dst_release(dst); 2967 dst_release(dst);
2968 return NULL; 2968 return NULL;
@@ -2973,8 +2973,9 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2973 skb_dst_set(skb, dst); 2973 skb_dst_set(skb, dst);
2974 2974
2975 mss = dst_metric_advmss(dst); 2975 mss = dst_metric_advmss(dst);
2976 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2976 user_mss = READ_ONCE(tp->rx_opt.user_mss);
2977 mss = tp->rx_opt.user_mss; 2977 if (user_mss && user_mss < mss)
2978 mss = user_mss;
2978 2979
2979 memset(&opts, 0, sizeof(opts)); 2980 memset(&opts, 0, sizeof(opts));
2980#ifdef CONFIG_SYN_COOKIES 2981#ifdef CONFIG_SYN_COOKIES
@@ -2989,8 +2990,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2989 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); 2990 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
2990#endif 2991#endif
2991 skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); 2992 skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
2992 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, 2993 tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
2993 foc) + sizeof(*th); 2994 sizeof(*th);
2994 2995
2995 skb_push(skb, tcp_header_size); 2996 skb_push(skb, tcp_header_size);
2996 skb_reset_transport_header(skb); 2997 skb_reset_transport_header(skb);
@@ -2999,7 +3000,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2999 memset(th, 0, sizeof(struct tcphdr)); 3000 memset(th, 0, sizeof(struct tcphdr));
3000 th->syn = 1; 3001 th->syn = 1;
3001 th->ack = 1; 3002 th->ack = 1;
3002 tcp_ecn_make_synack(req, th, sk); 3003 tcp_ecn_make_synack(req, th);
3003 th->source = htons(ireq->ir_num); 3004 th->source = htons(ireq->ir_num);
3004 th->dest = ireq->ir_rmt_port; 3005 th->dest = ireq->ir_rmt_port;
3005 /* Setting of flags are superfluous here for callers (and ECE is 3006 /* Setting of flags are superfluous here for callers (and ECE is
@@ -3014,7 +3015,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
3014 3015
3015 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 3016 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3016 th->window = htons(min(req->rcv_wnd, 65535U)); 3017 th->window = htons(min(req->rcv_wnd, 65535U));
3017 tcp_options_write((__be32 *)(th + 1), tp, &opts); 3018 tcp_options_write((__be32 *)(th + 1), NULL, &opts);
3018 th->doff = (tcp_header_size >> 2); 3019 th->doff = (tcp_header_size >> 2);
3019 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); 3020 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
3020 3021
@@ -3501,7 +3502,7 @@ void tcp_send_probe0(struct sock *sk)
3501 TCP_RTO_MAX); 3502 TCP_RTO_MAX);
3502} 3503}
3503 3504
3504int tcp_rtx_synack(struct sock *sk, struct request_sock *req) 3505int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
3505{ 3506{
3506 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; 3507 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
3507 struct flowi fl; 3508 struct flowi fl;