aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2014-05-11 23:22:11 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-13 17:53:02 -0400
commit843f4a55e336e6d0c7bb92e7f9621535bc8d5fcd (patch)
tree17010fcb1b56174476b471758c3ca4f825ccbe7f /net/ipv4/tcp_ipv4.c
parent89278c9dc922272df921042aafa18311f3398c6c (diff)
tcp: use tcp_v4_send_synack on first SYN-ACK
To avoid large code duplication in IPv6, we need to first simplify the complicate SYN-ACK sending code in tcp_v4_conn_request(). To use tcp_v4(6)_send_synack() to send all SYN-ACKs, we need to initialize the mini socket's receive window before trying to create the child socket and/or building the SYN-ACK packet. So we move that initialization from tcp_make_synack() to tcp_v4_conn_request() as a new function tcp_openreq_init_req_rwin(). After this refactoring the SYN-ACK sending code is simpler and easier to implement Fast Open for IPv6. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Daniel Lee <longinus00@gmail.com> Signed-off-by: Jerry Chu <hkchu@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c57
1 files changed, 16 insertions, 41 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5ea0949dadfd..1665f0f84233 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -822,7 +822,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
822 */ 822 */
823static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 823static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
824 struct request_sock *req, 824 struct request_sock *req,
825 u16 queue_mapping) 825 u16 queue_mapping,
826 struct tcp_fastopen_cookie *foc)
826{ 827{
827 const struct inet_request_sock *ireq = inet_rsk(req); 828 const struct inet_request_sock *ireq = inet_rsk(req);
828 struct flowi4 fl4; 829 struct flowi4 fl4;
@@ -833,7 +834,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
833 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 834 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 return -1; 835 return -1;
835 836
836 skb = tcp_make_synack(sk, dst, req, NULL); 837 skb = tcp_make_synack(sk, dst, req, foc);
837 838
838 if (skb) { 839 if (skb) {
839 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 840 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -852,7 +853,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
852 853
853static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req) 854static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
854{ 855{
855 int res = tcp_v4_send_synack(sk, NULL, req, 0); 856 int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
856 857
857 if (!res) { 858 if (!res) {
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 859 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -1270,11 +1271,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1270 __be32 saddr = ip_hdr(skb)->saddr; 1271 __be32 saddr = ip_hdr(skb)->saddr;
1271 __be32 daddr = ip_hdr(skb)->daddr; 1272 __be32 daddr = ip_hdr(skb)->daddr;
1272 __u32 isn = TCP_SKB_CB(skb)->when; 1273 __u32 isn = TCP_SKB_CB(skb)->when;
1273 bool want_cookie = false; 1274 bool want_cookie = false, fastopen;
1274 struct flowi4 fl4; 1275 struct flowi4 fl4;
1275 struct tcp_fastopen_cookie foc = { .len = -1 }; 1276 struct tcp_fastopen_cookie foc = { .len = -1 };
1276 struct sk_buff *skb_synack; 1277 int err;
1277 int do_fastopen;
1278 1278
1279 /* Never answer to SYNs send to broadcast or multicast */ 1279 /* Never answer to SYNs send to broadcast or multicast */
1280 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1280 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1373,49 +1373,24 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1373 1373
1374 isn = tcp_v4_init_sequence(skb); 1374 isn = tcp_v4_init_sequence(skb);
1375 } 1375 }
1376 tcp_rsk(req)->snt_isn = isn; 1376 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1377
1378 if (dst == NULL) {
1379 dst = inet_csk_route_req(sk, &fl4, req);
1380 if (dst == NULL)
1381 goto drop_and_free;
1382 }
1383 do_fastopen = !want_cookie &&
1384 tcp_fastopen_check(sk, skb, req, &foc);
1385
1386 /* We don't call tcp_v4_send_synack() directly because we need
1387 * to make sure a child socket can be created successfully before
1388 * sending back synack!
1389 *
1390 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1391 * (or better yet, call tcp_send_synack() in the child context
1392 * directly, but will have to fix bunch of other code first)
1393 * after syn_recv_sock() except one will need to first fix the
1394 * latter to remove its dependency on the current implementation
1395 * of tcp_v4_send_synack()->tcp_select_initial_window().
1396 */
1397 skb_synack = tcp_make_synack(sk, dst, req, &foc);
1398
1399 if (skb_synack) {
1400 __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1401 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1402 } else
1403 goto drop_and_free; 1377 goto drop_and_free;
1404 1378
1405 if (likely(!do_fastopen)) { 1379 tcp_rsk(req)->snt_isn = isn;
1406 int err; 1380 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1407 err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr, 1381 tcp_openreq_init_rwin(req, sk, dst);
1408 ireq->ir_rmt_addr, ireq->opt); 1382 fastopen = !want_cookie &&
1409 err = net_xmit_eval(err); 1383 tcp_try_fastopen(sk, skb, req, &foc, dst);
1384 err = tcp_v4_send_synack(sk, dst, req,
1385 skb_get_queue_mapping(skb), &foc);
1386 if (!fastopen) {
1410 if (err || want_cookie) 1387 if (err || want_cookie)
1411 goto drop_and_free; 1388 goto drop_and_free;
1412 1389
1413 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1390 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1414 tcp_rsk(req)->listener = NULL; 1391 tcp_rsk(req)->listener = NULL;
1415 /* Add the request_sock to the SYN table */
1416 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1392 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1417 } else if (tcp_fastopen_create_child(sk, skb, skb_synack, req)) 1393 }
1418 goto drop_and_release;
1419 1394
1420 return 0; 1395 return 0;
1421 1396