aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorDaniel Lee <longinus00@gmail.com>2014-05-11 23:22:13 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-13 17:53:03 -0400
commit3a19ce0eec32667b835d8dc887002019fc6b3a02 (patch)
treea5e44c9427f0c6a5cb0e6252e3ccbd5ef4f7a50e /net/ipv6
parent0a672f74131dd682087dfd5f45bf61f95804772e (diff)
tcp: IPv6 support for fastopen server
After all the preparatory works, supporting IPv6 in Fast Open is now easy. We pretty much just mirror v4 code. The only difference is how we generate the Fast Open cookie for IPv6 sockets. Since Fast Open cookie is 128 bits and we use AES 128, we use CBC-MAC to encrypt both the source and destination IPv6 addresses since the cookie is a MAC tag. Signed-off-by: Daniel Lee <longinus00@gmail.com> Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Jerry Chu <hkchu@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/tcp_ipv6.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a7a62ce12b3f..3a267bf14f2f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -472,7 +472,8 @@ out:
472static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, 472static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
473 struct flowi6 *fl6, 473 struct flowi6 *fl6,
474 struct request_sock *req, 474 struct request_sock *req,
475 u16 queue_mapping) 475 u16 queue_mapping,
476 struct tcp_fastopen_cookie *foc)
476{ 477{
477 struct inet_request_sock *ireq = inet_rsk(req); 478 struct inet_request_sock *ireq = inet_rsk(req);
478 struct ipv6_pinfo *np = inet6_sk(sk); 479 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -483,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
483 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 484 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
484 goto done; 485 goto done;
485 486
486 skb = tcp_make_synack(sk, dst, req, NULL); 487 skb = tcp_make_synack(sk, dst, req, foc);
487 488
488 if (skb) { 489 if (skb) {
489 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, 490 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -507,7 +508,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
507 struct flowi6 fl6; 508 struct flowi6 fl6;
508 int res; 509 int res;
509 510
510 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0); 511 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
511 if (!res) { 512 if (!res) {
512 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 513 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
513 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
@@ -926,7 +927,12 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
926static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 927static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
927 struct request_sock *req) 928 struct request_sock *req)
928{ 929{
929 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, 930 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
931 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
932 */
933 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
934 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
935 tcp_rsk(req)->rcv_nxt,
930 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, 936 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
931 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 937 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
932 0, 0); 938 0, 0);
@@ -978,8 +984,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
978 struct tcp_sock *tp = tcp_sk(sk); 984 struct tcp_sock *tp = tcp_sk(sk);
979 __u32 isn = TCP_SKB_CB(skb)->when; 985 __u32 isn = TCP_SKB_CB(skb)->when;
980 struct dst_entry *dst = NULL; 986 struct dst_entry *dst = NULL;
987 struct tcp_fastopen_cookie foc = { .len = -1 };
988 bool want_cookie = false, fastopen;
981 struct flowi6 fl6; 989 struct flowi6 fl6;
982 bool want_cookie = false; 990 int err;
983 991
984 if (skb->protocol == htons(ETH_P_IP)) 992 if (skb->protocol == htons(ETH_P_IP))
985 return tcp_v4_conn_request(sk, skb); 993 return tcp_v4_conn_request(sk, skb);
@@ -1010,7 +1018,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1010 tcp_clear_options(&tmp_opt); 1018 tcp_clear_options(&tmp_opt);
1011 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1019 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1012 tmp_opt.user_mss = tp->rx_opt.user_mss; 1020 tmp_opt.user_mss = tp->rx_opt.user_mss;
1013 tcp_parse_options(skb, &tmp_opt, 0, NULL); 1021 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1014 1022
1015 if (want_cookie && !tmp_opt.saw_tstamp) 1023 if (want_cookie && !tmp_opt.saw_tstamp)
1016 tcp_clear_options(&tmp_opt); 1024 tcp_clear_options(&tmp_opt);
@@ -1083,19 +1091,27 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1083 isn = tcp_v6_init_sequence(skb); 1091 isn = tcp_v6_init_sequence(skb);
1084 } 1092 }
1085have_isn: 1093have_isn:
1086 tcp_rsk(req)->snt_isn = isn;
1087 1094
1088 if (security_inet_conn_request(sk, skb, req)) 1095 if (security_inet_conn_request(sk, skb, req))
1089 goto drop_and_release; 1096 goto drop_and_release;
1090 1097
1091 if (tcp_v6_send_synack(sk, dst, &fl6, req, 1098 if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
1092 skb_get_queue_mapping(skb)) ||
1093 want_cookie)
1094 goto drop_and_free; 1099 goto drop_and_free;
1095 1100
1101 tcp_rsk(req)->snt_isn = isn;
1096 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1102 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1097 tcp_rsk(req)->listener = NULL; 1103 tcp_openreq_init_rwin(req, sk, dst);
1098 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1104 fastopen = !want_cookie &&
1105 tcp_try_fastopen(sk, skb, req, &foc, dst);
1106 err = tcp_v6_send_synack(sk, dst, &fl6, req,
1107 skb_get_queue_mapping(skb), &foc);
1108 if (!fastopen) {
1109 if (err || want_cookie)
1110 goto drop_and_free;
1111
1112 tcp_rsk(req)->listener = NULL;
1113 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1114 }
1099 return 0; 1115 return 0;
1100 1116
1101drop_and_release: 1117drop_and_release: