diff options
author | Yuchung Cheng <ycheng@google.com> | 2014-05-11 23:22:09 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-13 17:53:02 -0400 |
commit | 5b7ed0892f2af4e60b9a8d2c71c77774512a6cb9 (patch) | |
tree | 39c6fa6e97446aafb67a51de9d6624788b334c2a /net/ipv4/tcp_ipv4.c | |
parent | 4b9734e547aaa947e56480ecf6d509cf9cc307cc (diff) |
tcp: move fastopen functions to tcp_fastopen.c
Move common TFO functions that will be used by both v4 and v6
to tcp_fastopen.c. Create a helper tcp_fastopen_queue_check().
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Daniel Lee <longinus00@gmail.com>
Signed-off-by: Jerry Chu <hkchu@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 185 |
1 files changed, 2 insertions, 183 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad166dcc278f..032fcaee164a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1260,187 +1260,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { | |||
1260 | }; | 1260 | }; |
1261 | #endif | 1261 | #endif |
1262 | 1262 | ||
1263 | static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb, | ||
1264 | struct request_sock *req, | ||
1265 | struct tcp_fastopen_cookie *foc, | ||
1266 | struct tcp_fastopen_cookie *valid_foc) | ||
1267 | { | ||
1268 | bool skip_cookie = false; | ||
1269 | struct fastopen_queue *fastopenq; | ||
1270 | |||
1271 | if (likely(!fastopen_cookie_present(foc))) { | ||
1272 | /* See include/net/tcp.h for the meaning of these knobs */ | ||
1273 | if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) || | ||
1274 | ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) && | ||
1275 | (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1))) | ||
1276 | skip_cookie = true; /* no cookie to validate */ | ||
1277 | else | ||
1278 | return false; | ||
1279 | } | ||
1280 | fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq; | ||
1281 | /* A FO option is present; bump the counter. */ | ||
1282 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE); | ||
1283 | |||
1284 | /* Make sure the listener has enabled fastopen, and we don't | ||
1285 | * exceed the max # of pending TFO requests allowed before trying | ||
1286 | * to validating the cookie in order to avoid burning CPU cycles | ||
1287 | * unnecessarily. | ||
1288 | * | ||
1289 | * XXX (TFO) - The implication of checking the max_qlen before | ||
1290 | * processing a cookie request is that clients can't differentiate | ||
1291 | * between qlen overflow causing Fast Open to be disabled | ||
1292 | * temporarily vs a server not supporting Fast Open at all. | ||
1293 | */ | ||
1294 | if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 || | ||
1295 | fastopenq == NULL || fastopenq->max_qlen == 0) | ||
1296 | return false; | ||
1297 | |||
1298 | if (fastopenq->qlen >= fastopenq->max_qlen) { | ||
1299 | struct request_sock *req1; | ||
1300 | spin_lock(&fastopenq->lock); | ||
1301 | req1 = fastopenq->rskq_rst_head; | ||
1302 | if ((req1 == NULL) || time_after(req1->expires, jiffies)) { | ||
1303 | spin_unlock(&fastopenq->lock); | ||
1304 | NET_INC_STATS_BH(sock_net(sk), | ||
1305 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); | ||
1306 | /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/ | ||
1307 | foc->len = -1; | ||
1308 | return false; | ||
1309 | } | ||
1310 | fastopenq->rskq_rst_head = req1->dl_next; | ||
1311 | fastopenq->qlen--; | ||
1312 | spin_unlock(&fastopenq->lock); | ||
1313 | reqsk_free(req1); | ||
1314 | } | ||
1315 | if (skip_cookie) { | ||
1316 | tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | ||
1317 | return true; | ||
1318 | } | ||
1319 | |||
1320 | if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) { | ||
1321 | if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) { | ||
1322 | tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, | ||
1323 | ip_hdr(skb)->daddr, valid_foc); | ||
1324 | if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) || | ||
1325 | memcmp(&foc->val[0], &valid_foc->val[0], | ||
1326 | TCP_FASTOPEN_COOKIE_SIZE) != 0) | ||
1327 | return false; | ||
1328 | valid_foc->len = -1; | ||
1329 | } | ||
1330 | /* Acknowledge the data received from the peer. */ | ||
1331 | tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | ||
1332 | return true; | ||
1333 | } else if (foc->len == 0) { /* Client requesting a cookie */ | ||
1334 | tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, | ||
1335 | ip_hdr(skb)->daddr, valid_foc); | ||
1336 | NET_INC_STATS_BH(sock_net(sk), | ||
1337 | LINUX_MIB_TCPFASTOPENCOOKIEREQD); | ||
1338 | } else { | ||
1339 | /* Client sent a cookie with wrong size. Treat it | ||
1340 | * the same as invalid and return a valid one. | ||
1341 | */ | ||
1342 | tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, | ||
1343 | ip_hdr(skb)->daddr, valid_foc); | ||
1344 | } | ||
1345 | return false; | ||
1346 | } | ||
1347 | |||
1348 | static int tcp_v4_conn_req_fastopen(struct sock *sk, | ||
1349 | struct sk_buff *skb, | ||
1350 | struct sk_buff *skb_synack, | ||
1351 | struct request_sock *req) | ||
1352 | { | ||
1353 | struct tcp_sock *tp = tcp_sk(sk); | ||
1354 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | ||
1355 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
1356 | struct sock *child; | ||
1357 | int err; | ||
1358 | |||
1359 | req->num_retrans = 0; | ||
1360 | req->num_timeout = 0; | ||
1361 | req->sk = NULL; | ||
1362 | |||
1363 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | ||
1364 | if (child == NULL) { | ||
1365 | NET_INC_STATS_BH(sock_net(sk), | ||
1366 | LINUX_MIB_TCPFASTOPENPASSIVEFAIL); | ||
1367 | kfree_skb(skb_synack); | ||
1368 | return -1; | ||
1369 | } | ||
1370 | err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr, | ||
1371 | ireq->ir_rmt_addr, ireq->opt); | ||
1372 | err = net_xmit_eval(err); | ||
1373 | if (!err) | ||
1374 | tcp_rsk(req)->snt_synack = tcp_time_stamp; | ||
1375 | /* XXX (TFO) - is it ok to ignore error and continue? */ | ||
1376 | |||
1377 | spin_lock(&queue->fastopenq->lock); | ||
1378 | queue->fastopenq->qlen++; | ||
1379 | spin_unlock(&queue->fastopenq->lock); | ||
1380 | |||
1381 | /* Initialize the child socket. Have to fix some values to take | ||
1382 | * into account the child is a Fast Open socket and is created | ||
1383 | * only out of the bits carried in the SYN packet. | ||
1384 | */ | ||
1385 | tp = tcp_sk(child); | ||
1386 | |||
1387 | tp->fastopen_rsk = req; | ||
1388 | /* Do a hold on the listner sk so that if the listener is being | ||
1389 | * closed, the child that has been accepted can live on and still | ||
1390 | * access listen_lock. | ||
1391 | */ | ||
1392 | sock_hold(sk); | ||
1393 | tcp_rsk(req)->listener = sk; | ||
1394 | |||
1395 | /* RFC1323: The window in SYN & SYN/ACK segments is never | ||
1396 | * scaled. So correct it appropriately. | ||
1397 | */ | ||
1398 | tp->snd_wnd = ntohs(tcp_hdr(skb)->window); | ||
1399 | |||
1400 | /* Activate the retrans timer so that SYNACK can be retransmitted. | ||
1401 | * The request socket is not added to the SYN table of the parent | ||
1402 | * because it's been added to the accept queue directly. | ||
1403 | */ | ||
1404 | inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, | ||
1405 | TCP_TIMEOUT_INIT, TCP_RTO_MAX); | ||
1406 | |||
1407 | /* Add the child socket directly into the accept queue */ | ||
1408 | inet_csk_reqsk_queue_add(sk, req, child); | ||
1409 | |||
1410 | /* Now finish processing the fastopen child socket. */ | ||
1411 | inet_csk(child)->icsk_af_ops->rebuild_header(child); | ||
1412 | tcp_init_congestion_control(child); | ||
1413 | tcp_mtup_init(child); | ||
1414 | tcp_init_metrics(child); | ||
1415 | tcp_init_buffer_space(child); | ||
1416 | |||
1417 | /* Queue the data carried in the SYN packet. We need to first | ||
1418 | * bump skb's refcnt because the caller will attempt to free it. | ||
1419 | * | ||
1420 | * XXX (TFO) - we honor a zero-payload TFO request for now. | ||
1421 | * (Any reason not to?) | ||
1422 | */ | ||
1423 | if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) { | ||
1424 | /* Don't queue the skb if there is no payload in SYN. | ||
1425 | * XXX (TFO) - How about SYN+FIN? | ||
1426 | */ | ||
1427 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | ||
1428 | } else { | ||
1429 | skb = skb_get(skb); | ||
1430 | skb_dst_drop(skb); | ||
1431 | __skb_pull(skb, tcp_hdr(skb)->doff * 4); | ||
1432 | skb_set_owner_r(skb, child); | ||
1433 | __skb_queue_tail(&child->sk_receive_queue, skb); | ||
1434 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | ||
1435 | tp->syn_data_acked = 1; | ||
1436 | } | ||
1437 | sk->sk_data_ready(sk); | ||
1438 | bh_unlock_sock(child); | ||
1439 | sock_put(child); | ||
1440 | WARN_ON(req->sk == NULL); | ||
1441 | return 0; | ||
1442 | } | ||
1443 | |||
1444 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | 1263 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
1445 | { | 1264 | { |
1446 | struct tcp_options_received tmp_opt; | 1265 | struct tcp_options_received tmp_opt; |
@@ -1599,8 +1418,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1599 | if (fastopen_cookie_present(&foc) && foc.len != 0) | 1418 | if (fastopen_cookie_present(&foc) && foc.len != 0) |
1600 | NET_INC_STATS_BH(sock_net(sk), | 1419 | NET_INC_STATS_BH(sock_net(sk), |
1601 | LINUX_MIB_TCPFASTOPENPASSIVEFAIL); | 1420 | LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
1602 | } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req)) | 1421 | } else if (tcp_fastopen_create_child(sk, skb, skb_synack, req)) |
1603 | goto drop_and_free; | 1422 | goto drop_and_release; |
1604 | 1423 | ||
1605 | return 0; | 1424 | return 0; |
1606 | 1425 | ||