diff options
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 237 |
1 files changed, 76 insertions, 161 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 229239ad96b1..29964c3d363c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -198,6 +198,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
198 | sk->sk_v6_daddr = usin->sin6_addr; | 198 | sk->sk_v6_daddr = usin->sin6_addr; |
199 | np->flow_label = fl6.flowlabel; | 199 | np->flow_label = fl6.flowlabel; |
200 | 200 | ||
201 | ip6_set_txhash(sk); | ||
202 | |||
201 | /* | 203 | /* |
202 | * TCP over IPv4 | 204 | * TCP over IPv4 |
203 | */ | 205 | */ |
@@ -470,13 +472,14 @@ out: | |||
470 | 472 | ||
471 | 473 | ||
472 | static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, | 474 | static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, |
473 | struct flowi6 *fl6, | 475 | struct flowi *fl, |
474 | struct request_sock *req, | 476 | struct request_sock *req, |
475 | u16 queue_mapping, | 477 | u16 queue_mapping, |
476 | struct tcp_fastopen_cookie *foc) | 478 | struct tcp_fastopen_cookie *foc) |
477 | { | 479 | { |
478 | struct inet_request_sock *ireq = inet_rsk(req); | 480 | struct inet_request_sock *ireq = inet_rsk(req); |
479 | struct ipv6_pinfo *np = inet6_sk(sk); | 481 | struct ipv6_pinfo *np = inet6_sk(sk); |
482 | struct flowi6 *fl6 = &fl->u.ip6; | ||
480 | struct sk_buff *skb; | 483 | struct sk_buff *skb; |
481 | int err = -ENOMEM; | 484 | int err = -ENOMEM; |
482 | 485 | ||
@@ -503,18 +506,6 @@ done: | |||
503 | return err; | 506 | return err; |
504 | } | 507 | } |
505 | 508 | ||
506 | static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req) | ||
507 | { | ||
508 | struct flowi6 fl6; | ||
509 | int res; | ||
510 | |||
511 | res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL); | ||
512 | if (!res) { | ||
513 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); | ||
514 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); | ||
515 | } | ||
516 | return res; | ||
517 | } | ||
518 | 509 | ||
519 | static void tcp_v6_reqsk_destructor(struct request_sock *req) | 510 | static void tcp_v6_reqsk_destructor(struct request_sock *req) |
520 | { | 511 | { |
@@ -676,7 +667,8 @@ clear_hash_noput: | |||
676 | return 1; | 667 | return 1; |
677 | } | 668 | } |
678 | 669 | ||
679 | static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | 670 | static int __tcp_v6_inbound_md5_hash(struct sock *sk, |
671 | const struct sk_buff *skb) | ||
680 | { | 672 | { |
681 | const __u8 *hash_location = NULL; | 673 | const __u8 *hash_location = NULL; |
682 | struct tcp_md5sig_key *hash_expected; | 674 | struct tcp_md5sig_key *hash_expected; |
@@ -716,24 +708,80 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | |||
716 | } | 708 | } |
717 | return 0; | 709 | return 0; |
718 | } | 710 | } |
711 | |||
712 | static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | ||
713 | { | ||
714 | int ret; | ||
715 | |||
716 | rcu_read_lock(); | ||
717 | ret = __tcp_v6_inbound_md5_hash(sk, skb); | ||
718 | rcu_read_unlock(); | ||
719 | |||
720 | return ret; | ||
721 | } | ||
722 | |||
719 | #endif | 723 | #endif |
720 | 724 | ||
725 | static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, | ||
726 | struct sk_buff *skb) | ||
727 | { | ||
728 | struct inet_request_sock *ireq = inet_rsk(req); | ||
729 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
730 | |||
731 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; | ||
732 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; | ||
733 | |||
734 | ireq->ir_iif = sk->sk_bound_dev_if; | ||
735 | |||
736 | /* So that link locals have meaning */ | ||
737 | if (!sk->sk_bound_dev_if && | ||
738 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) | ||
739 | ireq->ir_iif = inet6_iif(skb); | ||
740 | |||
741 | if (!TCP_SKB_CB(skb)->when && | ||
742 | (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || | ||
743 | np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || | ||
744 | np->rxopt.bits.rxohlim || np->repflow)) { | ||
745 | atomic_inc(&skb->users); | ||
746 | ireq->pktopts = skb; | ||
747 | } | ||
748 | } | ||
749 | |||
750 | static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl, | ||
751 | const struct request_sock *req, | ||
752 | bool *strict) | ||
753 | { | ||
754 | if (strict) | ||
755 | *strict = true; | ||
756 | return inet6_csk_route_req(sk, &fl->u.ip6, req); | ||
757 | } | ||
758 | |||
721 | struct request_sock_ops tcp6_request_sock_ops __read_mostly = { | 759 | struct request_sock_ops tcp6_request_sock_ops __read_mostly = { |
722 | .family = AF_INET6, | 760 | .family = AF_INET6, |
723 | .obj_size = sizeof(struct tcp6_request_sock), | 761 | .obj_size = sizeof(struct tcp6_request_sock), |
724 | .rtx_syn_ack = tcp_v6_rtx_synack, | 762 | .rtx_syn_ack = tcp_rtx_synack, |
725 | .send_ack = tcp_v6_reqsk_send_ack, | 763 | .send_ack = tcp_v6_reqsk_send_ack, |
726 | .destructor = tcp_v6_reqsk_destructor, | 764 | .destructor = tcp_v6_reqsk_destructor, |
727 | .send_reset = tcp_v6_send_reset, | 765 | .send_reset = tcp_v6_send_reset, |
728 | .syn_ack_timeout = tcp_syn_ack_timeout, | 766 | .syn_ack_timeout = tcp_syn_ack_timeout, |
729 | }; | 767 | }; |
730 | 768 | ||
731 | #ifdef CONFIG_TCP_MD5SIG | ||
732 | static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | 769 | static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { |
770 | .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - | ||
771 | sizeof(struct ipv6hdr), | ||
772 | #ifdef CONFIG_TCP_MD5SIG | ||
733 | .md5_lookup = tcp_v6_reqsk_md5_lookup, | 773 | .md5_lookup = tcp_v6_reqsk_md5_lookup, |
734 | .calc_md5_hash = tcp_v6_md5_hash_skb, | 774 | .calc_md5_hash = tcp_v6_md5_hash_skb, |
735 | }; | ||
736 | #endif | 775 | #endif |
776 | .init_req = tcp_v6_init_req, | ||
777 | #ifdef CONFIG_SYN_COOKIES | ||
778 | .cookie_init_seq = cookie_v6_init_sequence, | ||
779 | #endif | ||
780 | .route_req = tcp_v6_route_req, | ||
781 | .init_seq = tcp_v6_init_sequence, | ||
782 | .send_synack = tcp_v6_send_synack, | ||
783 | .queue_hash_add = inet6_csk_reqsk_queue_hash_add, | ||
784 | }; | ||
737 | 785 | ||
738 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | 786 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, |
739 | u32 tsval, u32 tsecr, int oif, | 787 | u32 tsval, u32 tsecr, int oif, |
@@ -973,153 +1021,17 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
973 | return sk; | 1021 | return sk; |
974 | } | 1022 | } |
975 | 1023 | ||
976 | /* FIXME: this is substantially similar to the ipv4 code. | ||
977 | * Can some kind of merge be done? -- erics | ||
978 | */ | ||
979 | static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | 1024 | static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) |
980 | { | 1025 | { |
981 | struct tcp_options_received tmp_opt; | ||
982 | struct request_sock *req; | ||
983 | struct inet_request_sock *ireq; | ||
984 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
985 | struct tcp_sock *tp = tcp_sk(sk); | ||
986 | __u32 isn = TCP_SKB_CB(skb)->when; | ||
987 | struct dst_entry *dst = NULL; | ||
988 | struct tcp_fastopen_cookie foc = { .len = -1 }; | ||
989 | bool want_cookie = false, fastopen; | ||
990 | struct flowi6 fl6; | ||
991 | int err; | ||
992 | |||
993 | if (skb->protocol == htons(ETH_P_IP)) | 1026 | if (skb->protocol == htons(ETH_P_IP)) |
994 | return tcp_v4_conn_request(sk, skb); | 1027 | return tcp_v4_conn_request(sk, skb); |
995 | 1028 | ||
996 | if (!ipv6_unicast_destination(skb)) | 1029 | if (!ipv6_unicast_destination(skb)) |
997 | goto drop; | 1030 | goto drop; |
998 | 1031 | ||
999 | if ((sysctl_tcp_syncookies == 2 || | 1032 | return tcp_conn_request(&tcp6_request_sock_ops, |
1000 | inet_csk_reqsk_queue_is_full(sk)) && !isn) { | 1033 | &tcp_request_sock_ipv6_ops, sk, skb); |
1001 | want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); | ||
1002 | if (!want_cookie) | ||
1003 | goto drop; | ||
1004 | } | ||
1005 | |||
1006 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { | ||
1007 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | ||
1008 | goto drop; | ||
1009 | } | ||
1010 | |||
1011 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); | ||
1012 | if (req == NULL) | ||
1013 | goto drop; | ||
1014 | |||
1015 | #ifdef CONFIG_TCP_MD5SIG | ||
1016 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops; | ||
1017 | #endif | ||
1018 | |||
1019 | tcp_clear_options(&tmp_opt); | ||
1020 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | ||
1021 | tmp_opt.user_mss = tp->rx_opt.user_mss; | ||
1022 | tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc); | ||
1023 | |||
1024 | if (want_cookie && !tmp_opt.saw_tstamp) | ||
1025 | tcp_clear_options(&tmp_opt); | ||
1026 | |||
1027 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; | ||
1028 | tcp_openreq_init(req, &tmp_opt, skb); | ||
1029 | |||
1030 | ireq = inet_rsk(req); | ||
1031 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; | ||
1032 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; | ||
1033 | if (!want_cookie || tmp_opt.tstamp_ok) | ||
1034 | TCP_ECN_create_request(req, skb, sock_net(sk)); | ||
1035 | |||
1036 | ireq->ir_iif = sk->sk_bound_dev_if; | ||
1037 | ireq->ir_mark = inet_request_mark(sk, skb); | ||
1038 | |||
1039 | /* So that link locals have meaning */ | ||
1040 | if (!sk->sk_bound_dev_if && | ||
1041 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) | ||
1042 | ireq->ir_iif = inet6_iif(skb); | ||
1043 | |||
1044 | if (!isn) { | ||
1045 | if (ipv6_opt_accepted(sk, skb) || | ||
1046 | np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || | ||
1047 | np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim || | ||
1048 | np->repflow) { | ||
1049 | atomic_inc(&skb->users); | ||
1050 | ireq->pktopts = skb; | ||
1051 | } | ||
1052 | 1034 | ||
1053 | if (want_cookie) { | ||
1054 | isn = cookie_v6_init_sequence(sk, skb, &req->mss); | ||
1055 | req->cookie_ts = tmp_opt.tstamp_ok; | ||
1056 | goto have_isn; | ||
1057 | } | ||
1058 | |||
1059 | /* VJ's idea. We save last timestamp seen | ||
1060 | * from the destination in peer table, when entering | ||
1061 | * state TIME-WAIT, and check against it before | ||
1062 | * accepting new connection request. | ||
1063 | * | ||
1064 | * If "isn" is not zero, this request hit alive | ||
1065 | * timewait bucket, so that all the necessary checks | ||
1066 | * are made in the function processing timewait state. | ||
1067 | */ | ||
1068 | if (tmp_opt.saw_tstamp && | ||
1069 | tcp_death_row.sysctl_tw_recycle && | ||
1070 | (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) { | ||
1071 | if (!tcp_peer_is_proven(req, dst, true)) { | ||
1072 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); | ||
1073 | goto drop_and_release; | ||
1074 | } | ||
1075 | } | ||
1076 | /* Kill the following clause, if you dislike this way. */ | ||
1077 | else if (!sysctl_tcp_syncookies && | ||
1078 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < | ||
1079 | (sysctl_max_syn_backlog >> 2)) && | ||
1080 | !tcp_peer_is_proven(req, dst, false)) { | ||
1081 | /* Without syncookies last quarter of | ||
1082 | * backlog is filled with destinations, | ||
1083 | * proven to be alive. | ||
1084 | * It means that we continue to communicate | ||
1085 | * to destinations, already remembered | ||
1086 | * to the moment of synflood. | ||
1087 | */ | ||
1088 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n", | ||
1089 | &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source)); | ||
1090 | goto drop_and_release; | ||
1091 | } | ||
1092 | |||
1093 | isn = tcp_v6_init_sequence(skb); | ||
1094 | } | ||
1095 | have_isn: | ||
1096 | |||
1097 | if (security_inet_conn_request(sk, skb, req)) | ||
1098 | goto drop_and_release; | ||
1099 | |||
1100 | if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL) | ||
1101 | goto drop_and_free; | ||
1102 | |||
1103 | tcp_rsk(req)->snt_isn = isn; | ||
1104 | tcp_rsk(req)->snt_synack = tcp_time_stamp; | ||
1105 | tcp_openreq_init_rwin(req, sk, dst); | ||
1106 | fastopen = !want_cookie && | ||
1107 | tcp_try_fastopen(sk, skb, req, &foc, dst); | ||
1108 | err = tcp_v6_send_synack(sk, dst, &fl6, req, | ||
1109 | skb_get_queue_mapping(skb), &foc); | ||
1110 | if (!fastopen) { | ||
1111 | if (err || want_cookie) | ||
1112 | goto drop_and_free; | ||
1113 | |||
1114 | tcp_rsk(req)->listener = NULL; | ||
1115 | inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); | ||
1116 | } | ||
1117 | return 0; | ||
1118 | |||
1119 | drop_and_release: | ||
1120 | dst_release(dst); | ||
1121 | drop_and_free: | ||
1122 | reqsk_free(req); | ||
1123 | drop: | 1035 | drop: |
1124 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | 1036 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
1125 | return 0; /* don't send reset */ | 1037 | return 0; /* don't send reset */ |
@@ -1235,6 +1147,8 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1235 | newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; | 1147 | newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; |
1236 | newsk->sk_bound_dev_if = ireq->ir_iif; | 1148 | newsk->sk_bound_dev_if = ireq->ir_iif; |
1237 | 1149 | ||
1150 | ip6_set_txhash(newsk); | ||
1151 | |||
1238 | /* Now IPv6 options... | 1152 | /* Now IPv6 options... |
1239 | 1153 | ||
1240 | First: no IPv4 options. | 1154 | First: no IPv4 options. |
@@ -1346,11 +1260,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1346 | if (skb->protocol == htons(ETH_P_IP)) | 1260 | if (skb->protocol == htons(ETH_P_IP)) |
1347 | return tcp_v4_do_rcv(sk, skb); | 1261 | return tcp_v4_do_rcv(sk, skb); |
1348 | 1262 | ||
1349 | #ifdef CONFIG_TCP_MD5SIG | ||
1350 | if (tcp_v6_inbound_md5_hash(sk, skb)) | ||
1351 | goto discard; | ||
1352 | #endif | ||
1353 | |||
1354 | if (sk_filter(sk, skb)) | 1263 | if (sk_filter(sk, skb)) |
1355 | goto discard; | 1264 | goto discard; |
1356 | 1265 | ||
@@ -1523,6 +1432,11 @@ process: | |||
1523 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 1432 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
1524 | goto discard_and_relse; | 1433 | goto discard_and_relse; |
1525 | 1434 | ||
1435 | #ifdef CONFIG_TCP_MD5SIG | ||
1436 | if (tcp_v6_inbound_md5_hash(sk, skb)) | ||
1437 | goto discard_and_relse; | ||
1438 | #endif | ||
1439 | |||
1526 | if (sk_filter(sk, skb)) | 1440 | if (sk_filter(sk, skb)) |
1527 | goto discard_and_relse; | 1441 | goto discard_and_relse; |
1528 | 1442 | ||
@@ -1681,6 +1595,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { | |||
1681 | .compat_setsockopt = compat_ipv6_setsockopt, | 1595 | .compat_setsockopt = compat_ipv6_setsockopt, |
1682 | .compat_getsockopt = compat_ipv6_getsockopt, | 1596 | .compat_getsockopt = compat_ipv6_getsockopt, |
1683 | #endif | 1597 | #endif |
1598 | .mtu_reduced = tcp_v6_mtu_reduced, | ||
1684 | }; | 1599 | }; |
1685 | 1600 | ||
1686 | #ifdef CONFIG_TCP_MD5SIG | 1601 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1711,6 +1626,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { | |||
1711 | .compat_setsockopt = compat_ipv6_setsockopt, | 1626 | .compat_setsockopt = compat_ipv6_setsockopt, |
1712 | .compat_getsockopt = compat_ipv6_getsockopt, | 1627 | .compat_getsockopt = compat_ipv6_getsockopt, |
1713 | #endif | 1628 | #endif |
1629 | .mtu_reduced = tcp_v4_mtu_reduced, | ||
1714 | }; | 1630 | }; |
1715 | 1631 | ||
1716 | #ifdef CONFIG_TCP_MD5SIG | 1632 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1950,7 +1866,6 @@ struct proto tcpv6_prot = { | |||
1950 | .sendpage = tcp_sendpage, | 1866 | .sendpage = tcp_sendpage, |
1951 | .backlog_rcv = tcp_v6_do_rcv, | 1867 | .backlog_rcv = tcp_v6_do_rcv, |
1952 | .release_cb = tcp_release_cb, | 1868 | .release_cb = tcp_release_cb, |
1953 | .mtu_reduced = tcp_v6_mtu_reduced, | ||
1954 | .hash = tcp_v6_hash, | 1869 | .hash = tcp_v6_hash, |
1955 | .unhash = inet_unhash, | 1870 | .unhash = inet_unhash, |
1956 | .get_port = inet_csk_get_port, | 1871 | .get_port = inet_csk_get_port, |