diff options
author | Octavian Purdila <octavian.purdila@intel.com> | 2014-06-25 10:10:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-27 18:53:37 -0400 |
commit | 1fb6f159fd21c640a28eb65fbd62ce8c9f6a777e (patch) | |
tree | b7ba1708058d6c13ee686da1fce92a7d71b84c0f /net/ipv4/tcp_ipv4.c | |
parent | 695da14eb0af21129187ed3810e329b21262e45f (diff) |
tcp: add tcp_conn_request
Create tcp_conn_request and remove most of the code from
tcp_v4_conn_request and tcp_v6_conn_request.
Signed-off-by: Octavian Purdila <octavian.purdila@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 128 |
1 files changed, 2 insertions, 126 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 845c39de97ab..5dfebd2f2e38 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1282,137 +1282,13 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { | |||
1282 | 1282 | ||
1283 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | 1283 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
1284 | { | 1284 | { |
1285 | struct tcp_options_received tmp_opt; | ||
1286 | struct request_sock *req; | ||
1287 | struct tcp_sock *tp = tcp_sk(sk); | ||
1288 | struct dst_entry *dst = NULL; | ||
1289 | __be32 saddr = ip_hdr(skb)->saddr; | ||
1290 | __u32 isn = TCP_SKB_CB(skb)->when; | ||
1291 | bool want_cookie = false, fastopen; | ||
1292 | struct flowi4 fl4; | ||
1293 | struct tcp_fastopen_cookie foc = { .len = -1 }; | ||
1294 | const struct tcp_request_sock_ops *af_ops; | ||
1295 | int err; | ||
1296 | |||
1297 | /* Never answer to SYNs send to broadcast or multicast */ | 1285 | /* Never answer to SYNs send to broadcast or multicast */ |
1298 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | 1286 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
1299 | goto drop; | 1287 | goto drop; |
1300 | 1288 | ||
1301 | /* TW buckets are converted to open requests without | 1289 | return tcp_conn_request(&tcp_request_sock_ops, |
1302 | * limitations, they conserve resources and peer is | 1290 | &tcp_request_sock_ipv4_ops, sk, skb); |
1303 | * evidently real one. | ||
1304 | */ | ||
1305 | if ((sysctl_tcp_syncookies == 2 || | ||
1306 | inet_csk_reqsk_queue_is_full(sk)) && !isn) { | ||
1307 | want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); | ||
1308 | if (!want_cookie) | ||
1309 | goto drop; | ||
1310 | } | ||
1311 | |||
1312 | /* Accept backlog is full. If we have already queued enough | ||
1313 | * of warm entries in syn queue, drop request. It is better than | ||
1314 | * clogging syn queue with openreqs with exponentially increasing | ||
1315 | * timeout. | ||
1316 | */ | ||
1317 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { | ||
1318 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | ||
1319 | goto drop; | ||
1320 | } | ||
1321 | |||
1322 | req = inet_reqsk_alloc(&tcp_request_sock_ops); | ||
1323 | if (!req) | ||
1324 | goto drop; | ||
1325 | |||
1326 | af_ops = tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; | ||
1327 | |||
1328 | tcp_clear_options(&tmp_opt); | ||
1329 | tmp_opt.mss_clamp = af_ops->mss_clamp; | ||
1330 | tmp_opt.user_mss = tp->rx_opt.user_mss; | ||
1331 | tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc); | ||
1332 | |||
1333 | if (want_cookie && !tmp_opt.saw_tstamp) | ||
1334 | tcp_clear_options(&tmp_opt); | ||
1335 | |||
1336 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; | ||
1337 | tcp_openreq_init(req, &tmp_opt, skb, sk); | ||
1338 | |||
1339 | af_ops->init_req(req, sk, skb); | ||
1340 | |||
1341 | if (security_inet_conn_request(sk, skb, req)) | ||
1342 | goto drop_and_free; | ||
1343 | 1291 | ||
1344 | if (!want_cookie || tmp_opt.tstamp_ok) | ||
1345 | TCP_ECN_create_request(req, skb, sock_net(sk)); | ||
1346 | |||
1347 | if (want_cookie) { | ||
1348 | isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); | ||
1349 | req->cookie_ts = tmp_opt.tstamp_ok; | ||
1350 | } else if (!isn) { | ||
1351 | /* VJ's idea. We save last timestamp seen | ||
1352 | * from the destination in peer table, when entering | ||
1353 | * state TIME-WAIT, and check against it before | ||
1354 | * accepting new connection request. | ||
1355 | * | ||
1356 | * If "isn" is not zero, this request hit alive | ||
1357 | * timewait bucket, so that all the necessary checks | ||
1358 | * are made in the function processing timewait state. | ||
1359 | */ | ||
1360 | if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) { | ||
1361 | bool strict; | ||
1362 | |||
1363 | dst = af_ops->route_req(sk, (struct flowi *)&fl4, req, | ||
1364 | &strict); | ||
1365 | if (dst && strict && | ||
1366 | !tcp_peer_is_proven(req, dst, true)) { | ||
1367 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); | ||
1368 | goto drop_and_release; | ||
1369 | } | ||
1370 | } | ||
1371 | /* Kill the following clause, if you dislike this way. */ | ||
1372 | else if (!sysctl_tcp_syncookies && | ||
1373 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < | ||
1374 | (sysctl_max_syn_backlog >> 2)) && | ||
1375 | !tcp_peer_is_proven(req, dst, false)) { | ||
1376 | /* Without syncookies last quarter of | ||
1377 | * backlog is filled with destinations, | ||
1378 | * proven to be alive. | ||
1379 | * It means that we continue to communicate | ||
1380 | * to destinations, already remembered | ||
1381 | * to the moment of synflood. | ||
1382 | */ | ||
1383 | LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"), | ||
1384 | &saddr, ntohs(tcp_hdr(skb)->source)); | ||
1385 | goto drop_and_release; | ||
1386 | } | ||
1387 | |||
1388 | isn = af_ops->init_seq(skb); | ||
1389 | } | ||
1390 | if (!dst) { | ||
1391 | dst = af_ops->route_req(sk, (struct flowi *)&fl4, req, NULL); | ||
1392 | if (!dst) | ||
1393 | goto drop_and_free; | ||
1394 | } | ||
1395 | |||
1396 | tcp_rsk(req)->snt_isn = isn; | ||
1397 | tcp_openreq_init_rwin(req, sk, dst); | ||
1398 | fastopen = !want_cookie && | ||
1399 | tcp_try_fastopen(sk, skb, req, &foc, dst); | ||
1400 | err = af_ops->send_synack(sk, dst, NULL, req, | ||
1401 | skb_get_queue_mapping(skb), &foc); | ||
1402 | if (!fastopen) { | ||
1403 | if (err || want_cookie) | ||
1404 | goto drop_and_free; | ||
1405 | |||
1406 | tcp_rsk(req)->listener = NULL; | ||
1407 | af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT); | ||
1408 | } | ||
1409 | |||
1410 | return 0; | ||
1411 | |||
1412 | drop_and_release: | ||
1413 | dst_release(dst); | ||
1414 | drop_and_free: | ||
1415 | reqsk_free(req); | ||
1416 | drop: | 1292 | drop: |
1417 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | 1293 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
1418 | return 0; | 1294 | return 0; |