diff options
author | Yuchung Cheng <ycheng@google.com> | 2014-05-11 23:22:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-13 17:53:02 -0400 |
commit | 843f4a55e336e6d0c7bb92e7f9621535bc8d5fcd (patch) | |
tree | 17010fcb1b56174476b471758c3ca4f825ccbe7f /net/ipv4/tcp_fastopen.c | |
parent | 89278c9dc922272df921042aafa18311f3398c6c (diff) |
tcp: use tcp_v4_send_synack on first SYN-ACK
To avoid large code duplication in IPv6, we need to first simplify
the complicate SYN-ACK sending code in tcp_v4_conn_request().
To use tcp_v4(6)_send_synack() to send all SYN-ACKs, we need to
initialize the mini socket's receive window before trying to
create the child socket and/or building the SYN-ACK packet. So we move
that initialization from tcp_make_synack() to tcp_v4_conn_request()
as a new function tcp_openreq_init_req_rwin().
After this refactoring the SYN-ACK sending code is simpler and easier
to implement Fast Open for IPv6.
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Daniel Lee <longinus00@gmail.com>
Signed-off-by: Jerry Chu <hkchu@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_fastopen.c')
-rw-r--r-- | net/ipv4/tcp_fastopen.c | 67 |
1 files changed, 31 insertions, 36 deletions
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 5a98277b9a82..9b947a9aaf6e 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c | |||
@@ -95,34 +95,22 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst, | |||
95 | rcu_read_unlock(); | 95 | rcu_read_unlock(); |
96 | } | 96 | } |
97 | 97 | ||
98 | int tcp_fastopen_create_child(struct sock *sk, | 98 | static bool tcp_fastopen_create_child(struct sock *sk, |
99 | struct sk_buff *skb, | 99 | struct sk_buff *skb, |
100 | struct sk_buff *skb_synack, | 100 | struct dst_entry *dst, |
101 | struct request_sock *req) | 101 | struct request_sock *req) |
102 | { | 102 | { |
103 | struct tcp_sock *tp = tcp_sk(sk); | 103 | struct tcp_sock *tp = tcp_sk(sk); |
104 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | 104 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
105 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
106 | struct sock *child; | 105 | struct sock *child; |
107 | int err; | ||
108 | 106 | ||
109 | req->num_retrans = 0; | 107 | req->num_retrans = 0; |
110 | req->num_timeout = 0; | 108 | req->num_timeout = 0; |
111 | req->sk = NULL; | 109 | req->sk = NULL; |
112 | 110 | ||
113 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | 111 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); |
114 | if (child == NULL) { | 112 | if (child == NULL) |
115 | NET_INC_STATS_BH(sock_net(sk), | 113 | return false; |
116 | LINUX_MIB_TCPFASTOPENPASSIVEFAIL); | ||
117 | kfree_skb(skb_synack); | ||
118 | return -1; | ||
119 | } | ||
120 | err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr, | ||
121 | ireq->ir_rmt_addr, ireq->opt); | ||
122 | err = net_xmit_eval(err); | ||
123 | if (!err) | ||
124 | tcp_rsk(req)->snt_synack = tcp_time_stamp; | ||
125 | /* XXX (TFO) - is it ok to ignore error and continue? */ | ||
126 | 114 | ||
127 | spin_lock(&queue->fastopenq->lock); | 115 | spin_lock(&queue->fastopenq->lock); |
128 | queue->fastopenq->qlen++; | 116 | queue->fastopenq->qlen++; |
@@ -167,28 +155,24 @@ int tcp_fastopen_create_child(struct sock *sk, | |||
167 | /* Queue the data carried in the SYN packet. We need to first | 155 | /* Queue the data carried in the SYN packet. We need to first |
168 | * bump skb's refcnt because the caller will attempt to free it. | 156 | * bump skb's refcnt because the caller will attempt to free it. |
169 | * | 157 | * |
170 | * XXX (TFO) - we honor a zero-payload TFO request for now. | 158 | * XXX (TFO) - we honor a zero-payload TFO request for now, |
171 | * (Any reason not to?) | 159 | * (any reason not to?) but no need to queue the skb since |
160 | * there is no data. How about SYN+FIN? | ||
172 | */ | 161 | */ |
173 | if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) { | 162 | if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) { |
174 | /* Don't queue the skb if there is no payload in SYN. | ||
175 | * XXX (TFO) - How about SYN+FIN? | ||
176 | */ | ||
177 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | ||
178 | } else { | ||
179 | skb = skb_get(skb); | 163 | skb = skb_get(skb); |
180 | skb_dst_drop(skb); | 164 | skb_dst_drop(skb); |
181 | __skb_pull(skb, tcp_hdr(skb)->doff * 4); | 165 | __skb_pull(skb, tcp_hdr(skb)->doff * 4); |
182 | skb_set_owner_r(skb, child); | 166 | skb_set_owner_r(skb, child); |
183 | __skb_queue_tail(&child->sk_receive_queue, skb); | 167 | __skb_queue_tail(&child->sk_receive_queue, skb); |
184 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | ||
185 | tp->syn_data_acked = 1; | 168 | tp->syn_data_acked = 1; |
186 | } | 169 | } |
170 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | ||
187 | sk->sk_data_ready(sk); | 171 | sk->sk_data_ready(sk); |
188 | bh_unlock_sock(child); | 172 | bh_unlock_sock(child); |
189 | sock_put(child); | 173 | sock_put(child); |
190 | WARN_ON(req->sk == NULL); | 174 | WARN_ON(req->sk == NULL); |
191 | return 0; | 175 | return true; |
192 | } | 176 | } |
193 | EXPORT_SYMBOL(tcp_fastopen_create_child); | 177 | EXPORT_SYMBOL(tcp_fastopen_create_child); |
194 | 178 | ||
@@ -232,9 +216,10 @@ static bool tcp_fastopen_queue_check(struct sock *sk) | |||
232 | * may be updated and return the client in the SYN-ACK later. E.g., Fast Open | 216 | * may be updated and return the client in the SYN-ACK later. E.g., Fast Open |
233 | * cookie request (foc->len == 0). | 217 | * cookie request (foc->len == 0). |
234 | */ | 218 | */ |
235 | bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, | 219 | bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, |
236 | struct request_sock *req, | 220 | struct request_sock *req, |
237 | struct tcp_fastopen_cookie *foc) | 221 | struct tcp_fastopen_cookie *foc, |
222 | struct dst_entry *dst) | ||
238 | { | 223 | { |
239 | struct tcp_fastopen_cookie valid_foc = { .len = -1 }; | 224 | struct tcp_fastopen_cookie valid_foc = { .len = -1 }; |
240 | bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; | 225 | bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; |
@@ -255,11 +240,21 @@ bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, | |||
255 | if (foc->len == TCP_FASTOPEN_COOKIE_SIZE && | 240 | if (foc->len == TCP_FASTOPEN_COOKIE_SIZE && |
256 | foc->len == valid_foc.len && | 241 | foc->len == valid_foc.len && |
257 | !memcmp(foc->val, valid_foc.val, foc->len)) { | 242 | !memcmp(foc->val, valid_foc.val, foc->len)) { |
243 | /* Cookie is valid. Create a (full) child socket to accept | ||
244 | * the data in SYN before returning a SYN-ACK to ack the | ||
245 | * data. If we fail to create the socket, fall back and | ||
246 | * ack the ISN only but includes the same cookie. | ||
247 | * | ||
248 | * Note: Data-less SYN with valid cookie is allowed to send | ||
249 | * data in SYN_RECV state. | ||
250 | */ | ||
258 | fastopen: | 251 | fastopen: |
259 | tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 252 | if (tcp_fastopen_create_child(sk, skb, dst, req)) { |
260 | foc->len = -1; | 253 | foc->len = -1; |
261 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE); | 254 | NET_INC_STATS_BH(sock_net(sk), |
262 | return true; | 255 | LINUX_MIB_TCPFASTOPENPASSIVE); |
256 | return true; | ||
257 | } | ||
263 | } | 258 | } |
264 | 259 | ||
265 | NET_INC_STATS_BH(sock_net(sk), foc->len ? | 260 | NET_INC_STATS_BH(sock_net(sk), foc->len ? |
@@ -268,4 +263,4 @@ fastopen: | |||
268 | *foc = valid_foc; | 263 | *foc = valid_foc; |
269 | return false; | 264 | return false; |
270 | } | 265 | } |
271 | EXPORT_SYMBOL(tcp_fastopen_check); | 266 | EXPORT_SYMBOL(tcp_try_fastopen); |