summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOctavian Purdila <octavian.purdila@intel.com>2014-06-25 10:10:02 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-27 18:53:37 -0400
commit1fb6f159fd21c640a28eb65fbd62ce8c9f6a777e (patch)
treeb7ba1708058d6c13ee686da1fce92a7d71b84c0f
parent695da14eb0af21129187ed3810e329b21262e45f (diff)
tcp: add tcp_conn_request
Create tcp_conn_request and remove most of the code from tcp_v4_conn_request and tcp_v6_conn_request. Signed-off-by: Octavian Purdila <octavian.purdila@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h3
-rw-r--r--net/ipv4/tcp_input.c148
-rw-r--r--net/ipv4/tcp_ipv4.c128
-rw-r--r--net/ipv6/tcp_ipv6.c120
4 files changed, 155 insertions, 244 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cec6e2cf0610..0d5389aecf18 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1574,6 +1574,9 @@ void tcp4_proc_exit(void);
1574#endif 1574#endif
1575 1575
1576int tcp_rtx_synack(struct sock *sk, struct request_sock *req); 1576int tcp_rtx_synack(struct sock *sk, struct request_sock *req);
1577int tcp_conn_request(struct request_sock_ops *rsk_ops,
1578 const struct tcp_request_sock_ops *af_ops,
1579 struct sock *sk, struct sk_buff *skb);
1577 1580
1578/* TCP af-specific functions */ 1581/* TCP af-specific functions */
1579struct tcp_sock_af_ops { 1582struct tcp_sock_af_ops {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b5c23756965a..97e48d60c4e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5877,3 +5877,151 @@ discard:
5877 return 0; 5877 return 0;
5878} 5878}
5879EXPORT_SYMBOL(tcp_rcv_state_process); 5879EXPORT_SYMBOL(tcp_rcv_state_process);
5880
5881static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
5882{
5883 struct inet_request_sock *ireq = inet_rsk(req);
5884
5885 if (family == AF_INET)
5886 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
5887 &ireq->ir_rmt_addr, port);
5888 else
5889 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"),
5890 &ireq->ir_v6_rmt_addr, port);
5891}
5892
5893int tcp_conn_request(struct request_sock_ops *rsk_ops,
5894 const struct tcp_request_sock_ops *af_ops,
5895 struct sock *sk, struct sk_buff *skb)
5896{
5897 struct tcp_options_received tmp_opt;
5898 struct request_sock *req;
5899 struct tcp_sock *tp = tcp_sk(sk);
5900 struct dst_entry *dst = NULL;
5901 __u32 isn = TCP_SKB_CB(skb)->when;
5902 bool want_cookie = false, fastopen;
5903 struct flowi fl;
5904 struct tcp_fastopen_cookie foc = { .len = -1 };
5905 int err;
5906
5907
5908 /* TW buckets are converted to open requests without
5909 * limitations, they conserve resources and peer is
5910 * evidently real one.
5911 */
5912 if ((sysctl_tcp_syncookies == 2 ||
5913 inet_csk_reqsk_queue_is_full(sk)) && !isn) {
5914 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
5915 if (!want_cookie)
5916 goto drop;
5917 }
5918
5919
5920 /* Accept backlog is full. If we have already queued enough
5921 * of warm entries in syn queue, drop request. It is better than
5922 * clogging syn queue with openreqs with exponentially increasing
5923 * timeout.
5924 */
5925 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
5926 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
5927 goto drop;
5928 }
5929
5930 req = inet_reqsk_alloc(rsk_ops);
5931 if (!req)
5932 goto drop;
5933
5934 tcp_rsk(req)->af_specific = af_ops;
5935
5936 tcp_clear_options(&tmp_opt);
5937 tmp_opt.mss_clamp = af_ops->mss_clamp;
5938 tmp_opt.user_mss = tp->rx_opt.user_mss;
5939 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
5940
5941 if (want_cookie && !tmp_opt.saw_tstamp)
5942 tcp_clear_options(&tmp_opt);
5943
5944 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
5945 tcp_openreq_init(req, &tmp_opt, skb, sk);
5946
5947 af_ops->init_req(req, sk, skb);
5948
5949 if (security_inet_conn_request(sk, skb, req))
5950 goto drop_and_free;
5951
5952 if (!want_cookie || tmp_opt.tstamp_ok)
5953 TCP_ECN_create_request(req, skb, sock_net(sk));
5954
5955 if (want_cookie) {
5956 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
5957 req->cookie_ts = tmp_opt.tstamp_ok;
5958 } else if (!isn) {
5959 /* VJ's idea. We save last timestamp seen
5960 * from the destination in peer table, when entering
5961 * state TIME-WAIT, and check against it before
5962 * accepting new connection request.
5963 *
5964 * If "isn" is not zero, this request hit alive
5965 * timewait bucket, so that all the necessary checks
5966 * are made in the function processing timewait state.
5967 */
5968 if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
5969 bool strict;
5970
5971 dst = af_ops->route_req(sk, &fl, req, &strict);
5972 if (dst && strict &&
5973 !tcp_peer_is_proven(req, dst, true)) {
5974 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
5975 goto drop_and_release;
5976 }
5977 }
5978 /* Kill the following clause, if you dislike this way. */
5979 else if (!sysctl_tcp_syncookies &&
5980 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
5981 (sysctl_max_syn_backlog >> 2)) &&
5982 !tcp_peer_is_proven(req, dst, false)) {
5983 /* Without syncookies last quarter of
5984 * backlog is filled with destinations,
5985 * proven to be alive.
5986 * It means that we continue to communicate
5987 * to destinations, already remembered
5988 * to the moment of synflood.
5989 */
5990 pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
5991 rsk_ops->family);
5992 goto drop_and_release;
5993 }
5994
5995 isn = af_ops->init_seq(skb);
5996 }
5997 if (!dst) {
5998 dst = af_ops->route_req(sk, &fl, req, NULL);
5999 if (!dst)
6000 goto drop_and_free;
6001 }
6002
6003 tcp_rsk(req)->snt_isn = isn;
6004 tcp_openreq_init_rwin(req, sk, dst);
6005 fastopen = !want_cookie &&
6006 tcp_try_fastopen(sk, skb, req, &foc, dst);
6007 err = af_ops->send_synack(sk, dst, &fl, req,
6008 skb_get_queue_mapping(skb), &foc);
6009 if (!fastopen) {
6010 if (err || want_cookie)
6011 goto drop_and_free;
6012
6013 tcp_rsk(req)->listener = NULL;
6014 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
6015 }
6016
6017 return 0;
6018
6019drop_and_release:
6020 dst_release(dst);
6021drop_and_free:
6022 reqsk_free(req);
6023drop:
6024 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
6025 return 0;
6026}
6027EXPORT_SYMBOL(tcp_conn_request);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 845c39de97ab..5dfebd2f2e38 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1282,137 +1282,13 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1282 1282
1283int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1283int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1284{ 1284{
1285 struct tcp_options_received tmp_opt;
1286 struct request_sock *req;
1287 struct tcp_sock *tp = tcp_sk(sk);
1288 struct dst_entry *dst = NULL;
1289 __be32 saddr = ip_hdr(skb)->saddr;
1290 __u32 isn = TCP_SKB_CB(skb)->when;
1291 bool want_cookie = false, fastopen;
1292 struct flowi4 fl4;
1293 struct tcp_fastopen_cookie foc = { .len = -1 };
1294 const struct tcp_request_sock_ops *af_ops;
1295 int err;
1296
1297 /* Never answer to SYNs send to broadcast or multicast */ 1285 /* Never answer to SYNs send to broadcast or multicast */
1298 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1286 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1299 goto drop; 1287 goto drop;
1300 1288
1301 /* TW buckets are converted to open requests without 1289 return tcp_conn_request(&tcp_request_sock_ops,
1302 * limitations, they conserve resources and peer is 1290 &tcp_request_sock_ipv4_ops, sk, skb);
1303 * evidently real one.
1304 */
1305 if ((sysctl_tcp_syncookies == 2 ||
1306 inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1307 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1308 if (!want_cookie)
1309 goto drop;
1310 }
1311
1312 /* Accept backlog is full. If we have already queued enough
1313 * of warm entries in syn queue, drop request. It is better than
1314 * clogging syn queue with openreqs with exponentially increasing
1315 * timeout.
1316 */
1317 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1318 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1319 goto drop;
1320 }
1321
1322 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1323 if (!req)
1324 goto drop;
1325
1326 af_ops = tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1327
1328 tcp_clear_options(&tmp_opt);
1329 tmp_opt.mss_clamp = af_ops->mss_clamp;
1330 tmp_opt.user_mss = tp->rx_opt.user_mss;
1331 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1332
1333 if (want_cookie && !tmp_opt.saw_tstamp)
1334 tcp_clear_options(&tmp_opt);
1335
1336 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1337 tcp_openreq_init(req, &tmp_opt, skb, sk);
1338
1339 af_ops->init_req(req, sk, skb);
1340
1341 if (security_inet_conn_request(sk, skb, req))
1342 goto drop_and_free;
1343 1291
1344 if (!want_cookie || tmp_opt.tstamp_ok)
1345 TCP_ECN_create_request(req, skb, sock_net(sk));
1346
1347 if (want_cookie) {
1348 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
1349 req->cookie_ts = tmp_opt.tstamp_ok;
1350 } else if (!isn) {
1351 /* VJ's idea. We save last timestamp seen
1352 * from the destination in peer table, when entering
1353 * state TIME-WAIT, and check against it before
1354 * accepting new connection request.
1355 *
1356 * If "isn" is not zero, this request hit alive
1357 * timewait bucket, so that all the necessary checks
1358 * are made in the function processing timewait state.
1359 */
1360 if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
1361 bool strict;
1362
1363 dst = af_ops->route_req(sk, (struct flowi *)&fl4, req,
1364 &strict);
1365 if (dst && strict &&
1366 !tcp_peer_is_proven(req, dst, true)) {
1367 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1368 goto drop_and_release;
1369 }
1370 }
1371 /* Kill the following clause, if you dislike this way. */
1372 else if (!sysctl_tcp_syncookies &&
1373 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1374 (sysctl_max_syn_backlog >> 2)) &&
1375 !tcp_peer_is_proven(req, dst, false)) {
1376 /* Without syncookies last quarter of
1377 * backlog is filled with destinations,
1378 * proven to be alive.
1379 * It means that we continue to communicate
1380 * to destinations, already remembered
1381 * to the moment of synflood.
1382 */
1383 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1384 &saddr, ntohs(tcp_hdr(skb)->source));
1385 goto drop_and_release;
1386 }
1387
1388 isn = af_ops->init_seq(skb);
1389 }
1390 if (!dst) {
1391 dst = af_ops->route_req(sk, (struct flowi *)&fl4, req, NULL);
1392 if (!dst)
1393 goto drop_and_free;
1394 }
1395
1396 tcp_rsk(req)->snt_isn = isn;
1397 tcp_openreq_init_rwin(req, sk, dst);
1398 fastopen = !want_cookie &&
1399 tcp_try_fastopen(sk, skb, req, &foc, dst);
1400 err = af_ops->send_synack(sk, dst, NULL, req,
1401 skb_get_queue_mapping(skb), &foc);
1402 if (!fastopen) {
1403 if (err || want_cookie)
1404 goto drop_and_free;
1405
1406 tcp_rsk(req)->listener = NULL;
1407 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1408 }
1409
1410 return 0;
1411
1412drop_and_release:
1413 dst_release(dst);
1414drop_and_free:
1415 reqsk_free(req);
1416drop: 1292drop:
1417 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1293 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1418 return 0; 1294 return 0;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8232bc7423c6..bc24ee21339a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1008,133 +1008,17 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
1008 return sk; 1008 return sk;
1009} 1009}
1010 1010
1011/* FIXME: this is substantially similar to the ipv4 code.
1012 * Can some kind of merge be done? -- erics
1013 */
1014static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 1011static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1015{ 1012{
1016 struct tcp_options_received tmp_opt;
1017 struct request_sock *req;
1018 struct inet_request_sock *ireq;
1019 struct tcp_sock *tp = tcp_sk(sk);
1020 __u32 isn = TCP_SKB_CB(skb)->when;
1021 struct dst_entry *dst = NULL;
1022 struct tcp_fastopen_cookie foc = { .len = -1 };
1023 bool want_cookie = false, fastopen;
1024 struct flowi6 fl6;
1025 const struct tcp_request_sock_ops *af_ops;
1026 int err;
1027
1028 if (skb->protocol == htons(ETH_P_IP)) 1013 if (skb->protocol == htons(ETH_P_IP))
1029 return tcp_v4_conn_request(sk, skb); 1014 return tcp_v4_conn_request(sk, skb);
1030 1015
1031 if (!ipv6_unicast_destination(skb)) 1016 if (!ipv6_unicast_destination(skb))
1032 goto drop; 1017 goto drop;
1033 1018
1034 if ((sysctl_tcp_syncookies == 2 || 1019 return tcp_conn_request(&tcp6_request_sock_ops,
1035 inet_csk_reqsk_queue_is_full(sk)) && !isn) { 1020 &tcp_request_sock_ipv6_ops, sk, skb);
1036 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1037 if (!want_cookie)
1038 goto drop;
1039 }
1040 1021
1041 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1042 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1043 goto drop;
1044 }
1045
1046 req = inet_reqsk_alloc(&tcp6_request_sock_ops);
1047 if (req == NULL)
1048 goto drop;
1049
1050 af_ops = tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1051
1052 tcp_clear_options(&tmp_opt);
1053 tmp_opt.mss_clamp = af_ops->mss_clamp;
1054 tmp_opt.user_mss = tp->rx_opt.user_mss;
1055 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1056
1057 if (want_cookie && !tmp_opt.saw_tstamp)
1058 tcp_clear_options(&tmp_opt);
1059
1060 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1061 tcp_openreq_init(req, &tmp_opt, skb, sk);
1062
1063 ireq = inet_rsk(req);
1064 af_ops->init_req(req, sk, skb);
1065
1066 if (security_inet_conn_request(sk, skb, req))
1067 goto drop_and_release;
1068
1069 if (!want_cookie || tmp_opt.tstamp_ok)
1070 TCP_ECN_create_request(req, skb, sock_net(sk));
1071
1072 if (want_cookie) {
1073 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
1074 req->cookie_ts = tmp_opt.tstamp_ok;
1075 } else if (!isn) {
1076 /* VJ's idea. We save last timestamp seen
1077 * from the destination in peer table, when entering
1078 * state TIME-WAIT, and check against it before
1079 * accepting new connection request.
1080 *
1081 * If "isn" is not zero, this request hit alive
1082 * timewait bucket, so that all the necessary checks
1083 * are made in the function processing timewait state.
1084 */
1085 if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
1086 dst = af_ops->route_req(sk, (struct flowi *)&fl6, req,
1087 NULL);
1088 if (dst && !tcp_peer_is_proven(req, dst, true)) {
1089 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1090 goto drop_and_release;
1091 }
1092 }
1093 /* Kill the following clause, if you dislike this way. */
1094 else if (!sysctl_tcp_syncookies &&
1095 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1096 (sysctl_max_syn_backlog >> 2)) &&
1097 !tcp_peer_is_proven(req, dst, false)) {
1098 /* Without syncookies last quarter of
1099 * backlog is filled with destinations,
1100 * proven to be alive.
1101 * It means that we continue to communicate
1102 * to destinations, already remembered
1103 * to the moment of synflood.
1104 */
1105 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1106 &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
1107 goto drop_and_release;
1108 }
1109
1110 isn = af_ops->init_seq(skb);
1111 }
1112
1113 if (!dst) {
1114 dst = af_ops->route_req(sk, (struct flowi *)&fl6, req, NULL);
1115 if (!dst)
1116 goto drop_and_free;
1117 }
1118
1119 tcp_rsk(req)->snt_isn = isn;
1120 tcp_openreq_init_rwin(req, sk, dst);
1121 fastopen = !want_cookie &&
1122 tcp_try_fastopen(sk, skb, req, &foc, dst);
1123 err = af_ops->send_synack(sk, dst, (struct flowi *)&fl6, req,
1124 skb_get_queue_mapping(skb), &foc);
1125 if (!fastopen) {
1126 if (err || want_cookie)
1127 goto drop_and_free;
1128
1129 tcp_rsk(req)->listener = NULL;
1130 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1131 }
1132 return 0;
1133
1134drop_and_release:
1135 dst_release(dst);
1136drop_and_free:
1137 reqsk_free(req);
1138drop: 1022drop:
1139 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1023 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1140 return 0; /* don't send reset */ 1024 return 0; /* don't send reset */