aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-05-13 17:53:46 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-13 17:53:46 -0400
commitae8b42c6fc37ca1b7eb30898f5a65196bbb47291 (patch)
treea5e44c9427f0c6a5cb0e6252e3ccbd5ef4f7a50e /net/ipv6/tcp_ipv6.c
parent4b9734e547aaa947e56480ecf6d509cf9cc307cc (diff)
parent3a19ce0eec32667b835d8dc887002019fc6b3a02 (diff)
Merge branch 'tcp-fastopen-ipv6'
Yuchung Cheng says: ==================== tcp: IPv6 support for fastopen server This patch series add IPv6 support for fastopen server. To minimize code duplication in IPv4 and IPv6, the current v4 only code is refactored and common code is moved into net/ipv4/tcp_fastopen.c. Also the current code uses a different function from tcp_v4_send_synack() to send the first SYN-ACK in fastopen. The new code eliminates this separate function by refactoring the child-socket and syn-ack creation code. After these refactoring in the first four patches, we can easily add the fastopen code in IPv6 by changing corresponding IPv6 functions. Note Fast Open client already supports IPv6. This patch is for the server-side (passive open) IPv6 support only. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c62
1 files changed, 45 insertions, 17 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7fa67439f4d6..3a267bf14f2f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -340,7 +340,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
340 struct sock *sk; 340 struct sock *sk;
341 int err; 341 int err;
342 struct tcp_sock *tp; 342 struct tcp_sock *tp;
343 __u32 seq; 343 struct request_sock *fastopen;
344 __u32 seq, snd_una;
344 struct net *net = dev_net(skb->dev); 345 struct net *net = dev_net(skb->dev);
345 346
346 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, 347 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
@@ -371,8 +372,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
371 372
372 tp = tcp_sk(sk); 373 tp = tcp_sk(sk);
373 seq = ntohl(th->seq); 374 seq = ntohl(th->seq);
375 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
376 fastopen = tp->fastopen_rsk;
377 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
374 if (sk->sk_state != TCP_LISTEN && 378 if (sk->sk_state != TCP_LISTEN &&
375 !between(seq, tp->snd_una, tp->snd_nxt)) { 379 !between(seq, snd_una, tp->snd_nxt)) {
376 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 380 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
377 goto out; 381 goto out;
378 } 382 }
@@ -436,8 +440,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
436 goto out; 440 goto out;
437 441
438 case TCP_SYN_SENT: 442 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen. 443 case TCP_SYN_RECV:
440 It can, it SYNs are crossed. --ANK */ 444 /* Only in fast or simultaneous open. If a fast open socket is
445 * is already accepted it is treated as a connected one below.
446 */
447 if (fastopen && fastopen->sk == NULL)
448 break;
449
441 if (!sock_owned_by_user(sk)) { 450 if (!sock_owned_by_user(sk)) {
442 sk->sk_err = err; 451 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 452 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
@@ -463,7 +472,8 @@ out:
463static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, 472static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
464 struct flowi6 *fl6, 473 struct flowi6 *fl6,
465 struct request_sock *req, 474 struct request_sock *req,
466 u16 queue_mapping) 475 u16 queue_mapping,
476 struct tcp_fastopen_cookie *foc)
467{ 477{
468 struct inet_request_sock *ireq = inet_rsk(req); 478 struct inet_request_sock *ireq = inet_rsk(req);
469 struct ipv6_pinfo *np = inet6_sk(sk); 479 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
474 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 484 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
475 goto done; 485 goto done;
476 486
477 skb = tcp_make_synack(sk, dst, req, NULL); 487 skb = tcp_make_synack(sk, dst, req, foc);
478 488
479 if (skb) { 489 if (skb) {
480 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, 490 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -498,7 +508,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
498 struct flowi6 fl6; 508 struct flowi6 fl6;
499 int res; 509 int res;
500 510
501 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0); 511 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
502 if (!res) { 512 if (!res) {
503 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 513 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
504 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
@@ -917,7 +927,12 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
917static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 927static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
918 struct request_sock *req) 928 struct request_sock *req)
919{ 929{
920 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, 930 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
931 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
932 */
933 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
934 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
935 tcp_rsk(req)->rcv_nxt,
921 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, 936 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
922 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 937 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
923 0, 0); 938 0, 0);
@@ -969,8 +984,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
969 struct tcp_sock *tp = tcp_sk(sk); 984 struct tcp_sock *tp = tcp_sk(sk);
970 __u32 isn = TCP_SKB_CB(skb)->when; 985 __u32 isn = TCP_SKB_CB(skb)->when;
971 struct dst_entry *dst = NULL; 986 struct dst_entry *dst = NULL;
987 struct tcp_fastopen_cookie foc = { .len = -1 };
988 bool want_cookie = false, fastopen;
972 struct flowi6 fl6; 989 struct flowi6 fl6;
973 bool want_cookie = false; 990 int err;
974 991
975 if (skb->protocol == htons(ETH_P_IP)) 992 if (skb->protocol == htons(ETH_P_IP))
976 return tcp_v4_conn_request(sk, skb); 993 return tcp_v4_conn_request(sk, skb);
@@ -1001,7 +1018,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1001 tcp_clear_options(&tmp_opt); 1018 tcp_clear_options(&tmp_opt);
1002 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1019 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1003 tmp_opt.user_mss = tp->rx_opt.user_mss; 1020 tmp_opt.user_mss = tp->rx_opt.user_mss;
1004 tcp_parse_options(skb, &tmp_opt, 0, NULL); 1021 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1005 1022
1006 if (want_cookie && !tmp_opt.saw_tstamp) 1023 if (want_cookie && !tmp_opt.saw_tstamp)
1007 tcp_clear_options(&tmp_opt); 1024 tcp_clear_options(&tmp_opt);
@@ -1074,19 +1091,27 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1074 isn = tcp_v6_init_sequence(skb); 1091 isn = tcp_v6_init_sequence(skb);
1075 } 1092 }
1076have_isn: 1093have_isn:
1077 tcp_rsk(req)->snt_isn = isn;
1078 1094
1079 if (security_inet_conn_request(sk, skb, req)) 1095 if (security_inet_conn_request(sk, skb, req))
1080 goto drop_and_release; 1096 goto drop_and_release;
1081 1097
1082 if (tcp_v6_send_synack(sk, dst, &fl6, req, 1098 if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
1083 skb_get_queue_mapping(skb)) ||
1084 want_cookie)
1085 goto drop_and_free; 1099 goto drop_and_free;
1086 1100
1101 tcp_rsk(req)->snt_isn = isn;
1087 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1102 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1088 tcp_rsk(req)->listener = NULL; 1103 tcp_openreq_init_rwin(req, sk, dst);
1089 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1104 fastopen = !want_cookie &&
1105 tcp_try_fastopen(sk, skb, req, &foc, dst);
1106 err = tcp_v6_send_synack(sk, dst, &fl6, req,
1107 skb_get_queue_mapping(skb), &foc);
1108 if (!fastopen) {
1109 if (err || want_cookie)
1110 goto drop_and_free;
1111
1112 tcp_rsk(req)->listener = NULL;
1113 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1114 }
1090 return 0; 1115 return 0;
1091 1116
1092drop_and_release: 1117drop_and_release:
@@ -1760,6 +1785,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1760 const struct inet_sock *inet = inet_sk(sp); 1785 const struct inet_sock *inet = inet_sk(sp);
1761 const struct tcp_sock *tp = tcp_sk(sp); 1786 const struct tcp_sock *tp = tcp_sk(sp);
1762 const struct inet_connection_sock *icsk = inet_csk(sp); 1787 const struct inet_connection_sock *icsk = inet_csk(sp);
1788 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1763 1789
1764 dest = &sp->sk_v6_daddr; 1790 dest = &sp->sk_v6_daddr;
1765 src = &sp->sk_v6_rcv_saddr; 1791 src = &sp->sk_v6_rcv_saddr;
@@ -1802,7 +1828,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1802 jiffies_to_clock_t(icsk->icsk_ack.ato), 1828 jiffies_to_clock_t(icsk->icsk_ack.ato),
1803 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1829 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1804 tp->snd_cwnd, 1830 tp->snd_cwnd,
1805 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh 1831 sp->sk_state == TCP_LISTEN ?
1832 (fastopenq ? fastopenq->max_qlen : 0) :
1833 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1806 ); 1834 );
1807} 1835}
1808 1836