diff options
author | Eric Dumazet <edumazet@google.com> | 2015-09-25 10:39:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-09-25 16:00:38 -0400 |
commit | 5d062de7f8ea1ca7c635957ff1144fba815ba34c (patch) | |
tree | 913926fe8c4d4d3594a4a8558aa3ac6feb80ded6 | |
parent | 6ac705b1805863b1899e85f641bb265f9e6e9d99 (diff) |
tcp: constify tcp_make_synack() socket argument
listener socket is not locked when tcp_make_synack() is called.
We better make sure no field is written.
There is one exception : Since SYNACK packets are attached to the listener
at this moment (or SYN_RECV child in case of Fast Open),
sock_wmalloc() needs to update sk->sk_wmem_alloc, but this is done using
atomic operations so this is safe.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 24 |
2 files changed, 16 insertions, 10 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 45bc3c63c3fd..19f23590baa0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -461,7 +461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
461 | int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); | 461 | int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); |
462 | int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); | 462 | int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
463 | int tcp_connect(struct sock *sk); | 463 | int tcp_connect(struct sock *sk); |
464 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | 464 | struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, |
465 | struct request_sock *req, | 465 | struct request_sock *req, |
466 | struct tcp_fastopen_cookie *foc); | 466 | struct tcp_fastopen_cookie *foc); |
467 | int tcp_disconnect(struct sock *sk, int flags); | 467 | int tcp_disconnect(struct sock *sk, int flags); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ba6194152d39..9eb67a8933f1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2944,20 +2944,25 @@ int tcp_send_synack(struct sock *sk) | |||
2944 | * Allocate one skb and build a SYNACK packet. | 2944 | * Allocate one skb and build a SYNACK packet. |
2945 | * @dst is consumed : Caller should not use it again. | 2945 | * @dst is consumed : Caller should not use it again. |
2946 | */ | 2946 | */ |
2947 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | 2947 | struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, |
2948 | struct request_sock *req, | 2948 | struct request_sock *req, |
2949 | struct tcp_fastopen_cookie *foc) | 2949 | struct tcp_fastopen_cookie *foc) |
2950 | { | 2950 | { |
2951 | struct tcp_out_options opts; | ||
2952 | struct inet_request_sock *ireq = inet_rsk(req); | 2951 | struct inet_request_sock *ireq = inet_rsk(req); |
2953 | struct tcp_sock *tp = tcp_sk(sk); | 2952 | const struct tcp_sock *tp = tcp_sk(sk); |
2954 | struct tcphdr *th; | ||
2955 | struct sk_buff *skb; | ||
2956 | struct tcp_md5sig_key *md5 = NULL; | 2953 | struct tcp_md5sig_key *md5 = NULL; |
2954 | struct tcp_out_options opts; | ||
2955 | struct sk_buff *skb; | ||
2957 | int tcp_header_size; | 2956 | int tcp_header_size; |
2957 | struct tcphdr *th; | ||
2958 | u16 user_mss; | ||
2958 | int mss; | 2959 | int mss; |
2959 | 2960 | ||
2960 | skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC); | 2961 | /* sk is a const pointer, because we want to express multiple cpus |
2962 | * might call us concurrently. | ||
2963 | * sock_wmalloc() will change sk->sk_wmem_alloc in an atomic way. | ||
2964 | */ | ||
2965 | skb = sock_wmalloc((struct sock *)sk, MAX_TCP_HEADER, 1, GFP_ATOMIC); | ||
2961 | if (unlikely(!skb)) { | 2966 | if (unlikely(!skb)) { |
2962 | dst_release(dst); | 2967 | dst_release(dst); |
2963 | return NULL; | 2968 | return NULL; |
@@ -2968,8 +2973,9 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2968 | skb_dst_set(skb, dst); | 2973 | skb_dst_set(skb, dst); |
2969 | 2974 | ||
2970 | mss = dst_metric_advmss(dst); | 2975 | mss = dst_metric_advmss(dst); |
2971 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) | 2976 | user_mss = READ_ONCE(tp->rx_opt.user_mss); |
2972 | mss = tp->rx_opt.user_mss; | 2977 | if (user_mss && user_mss < mss) |
2978 | mss = user_mss; | ||
2973 | 2979 | ||
2974 | memset(&opts, 0, sizeof(opts)); | 2980 | memset(&opts, 0, sizeof(opts)); |
2975 | #ifdef CONFIG_SYN_COOKIES | 2981 | #ifdef CONFIG_SYN_COOKIES |
@@ -3009,7 +3015,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
3009 | 3015 | ||
3010 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ | 3016 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ |
3011 | th->window = htons(min(req->rcv_wnd, 65535U)); | 3017 | th->window = htons(min(req->rcv_wnd, 65535U)); |
3012 | tcp_options_write((__be32 *)(th + 1), tp, &opts); | 3018 | tcp_options_write((__be32 *)(th + 1), NULL, &opts); |
3013 | th->doff = (tcp_header_size >> 2); | 3019 | th->doff = (tcp_header_size >> 2); |
3014 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); | 3020 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); |
3015 | 3021 | ||