diff options
author | Trond Myklebust <trond.myklebust@primarydata.com> | 2015-02-18 10:28:37 -0500 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@primarydata.com> | 2015-02-18 10:28:37 -0500 |
commit | 65d2918e716afb89359cfa59734d76c1ff8700cb (patch) | |
tree | 4685404f96642243d62c3a1a823340913d087090 /net/ipv4/tcp_minisocks.c | |
parent | bf40e5561fd288a505d5d8d8bf45eef96fe7253d (diff) | |
parent | 338d00cfef07d74a072f96821c64b20f98517d72 (diff) |
Merge branch 'cleanups'
Merge cleanups requested by Linus.
* cleanups: (3 commits)
pnfs: Refactor the *_layout_mark_request_commit to use pnfs_layout_mark_request_commit
nfs: Can call nfs_clear_page_commit() instead
nfs: Provide and use helper functions for marking a page as unstable
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 66 |
1 files changed, 56 insertions, 10 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 63d2680b65db..dd11ac7798c6 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -58,6 +58,25 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) | |||
58 | return seq == e_win && seq == end_seq; | 58 | return seq == e_win && seq == end_seq; |
59 | } | 59 | } |
60 | 60 | ||
61 | static enum tcp_tw_status | ||
62 | tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, | ||
63 | const struct sk_buff *skb, int mib_idx) | ||
64 | { | ||
65 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | ||
66 | |||
67 | if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, | ||
68 | &tcptw->tw_last_oow_ack_time)) { | ||
69 | /* Send ACK. Note, we do not put the bucket, | ||
70 | * it will be released by caller. | ||
71 | */ | ||
72 | return TCP_TW_ACK; | ||
73 | } | ||
74 | |||
75 | /* We are rate-limiting, so just release the tw sock and drop skb. */ | ||
76 | inet_twsk_put(tw); | ||
77 | return TCP_TW_SUCCESS; | ||
78 | } | ||
79 | |||
61 | /* | 80 | /* |
62 | * * Main purpose of TIME-WAIT state is to close connection gracefully, | 81 | * * Main purpose of TIME-WAIT state is to close connection gracefully, |
63 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN | 82 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN |
@@ -116,7 +135,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, | |||
116 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | 135 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, |
117 | tcptw->tw_rcv_nxt, | 136 | tcptw->tw_rcv_nxt, |
118 | tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) | 137 | tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) |
119 | return TCP_TW_ACK; | 138 | return tcp_timewait_check_oow_rate_limit( |
139 | tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); | ||
120 | 140 | ||
121 | if (th->rst) | 141 | if (th->rst) |
122 | goto kill; | 142 | goto kill; |
@@ -250,10 +270,8 @@ kill: | |||
250 | inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, | 270 | inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, |
251 | TCP_TIMEWAIT_LEN); | 271 | TCP_TIMEWAIT_LEN); |
252 | 272 | ||
253 | /* Send ACK. Note, we do not put the bucket, | 273 | return tcp_timewait_check_oow_rate_limit( |
254 | * it will be released by caller. | 274 | tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); |
255 | */ | ||
256 | return TCP_TW_ACK; | ||
257 | } | 275 | } |
258 | inet_twsk_put(tw); | 276 | inet_twsk_put(tw); |
259 | return TCP_TW_SUCCESS; | 277 | return TCP_TW_SUCCESS; |
@@ -289,6 +307,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
289 | tcptw->tw_ts_recent = tp->rx_opt.ts_recent; | 307 | tcptw->tw_ts_recent = tp->rx_opt.ts_recent; |
290 | tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; | 308 | tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; |
291 | tcptw->tw_ts_offset = tp->tsoffset; | 309 | tcptw->tw_ts_offset = tp->tsoffset; |
310 | tcptw->tw_last_oow_ack_time = 0; | ||
292 | 311 | ||
293 | #if IS_ENABLED(CONFIG_IPV6) | 312 | #if IS_ENABLED(CONFIG_IPV6) |
294 | if (tw->tw_family == PF_INET6) { | 313 | if (tw->tw_family == PF_INET6) { |
@@ -399,6 +418,32 @@ static void tcp_ecn_openreq_child(struct tcp_sock *tp, | |||
399 | tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; | 418 | tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; |
400 | } | 419 | } |
401 | 420 | ||
421 | void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) | ||
422 | { | ||
423 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
424 | u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); | ||
425 | bool ca_got_dst = false; | ||
426 | |||
427 | if (ca_key != TCP_CA_UNSPEC) { | ||
428 | const struct tcp_congestion_ops *ca; | ||
429 | |||
430 | rcu_read_lock(); | ||
431 | ca = tcp_ca_find_key(ca_key); | ||
432 | if (likely(ca && try_module_get(ca->owner))) { | ||
433 | icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); | ||
434 | icsk->icsk_ca_ops = ca; | ||
435 | ca_got_dst = true; | ||
436 | } | ||
437 | rcu_read_unlock(); | ||
438 | } | ||
439 | |||
440 | if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner)) | ||
441 | tcp_assign_congestion_control(sk); | ||
442 | |||
443 | tcp_set_ca_state(sk, TCP_CA_Open); | ||
444 | } | ||
445 | EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); | ||
446 | |||
402 | /* This is not only more efficient than what we used to do, it eliminates | 447 | /* This is not only more efficient than what we used to do, it eliminates |
403 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM | 448 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM |
404 | * | 449 | * |
@@ -441,6 +486,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
441 | tcp_enable_early_retrans(newtp); | 486 | tcp_enable_early_retrans(newtp); |
442 | newtp->tlp_high_seq = 0; | 487 | newtp->tlp_high_seq = 0; |
443 | newtp->lsndtime = treq->snt_synack; | 488 | newtp->lsndtime = treq->snt_synack; |
489 | newtp->last_oow_ack_time = 0; | ||
444 | newtp->total_retrans = req->num_retrans; | 490 | newtp->total_retrans = req->num_retrans; |
445 | 491 | ||
446 | /* So many TCP implementations out there (incorrectly) count the | 492 | /* So many TCP implementations out there (incorrectly) count the |
@@ -451,10 +497,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
451 | newtp->snd_cwnd = TCP_INIT_CWND; | 497 | newtp->snd_cwnd = TCP_INIT_CWND; |
452 | newtp->snd_cwnd_cnt = 0; | 498 | newtp->snd_cwnd_cnt = 0; |
453 | 499 | ||
454 | if (!try_module_get(newicsk->icsk_ca_ops->owner)) | ||
455 | tcp_assign_congestion_control(newsk); | ||
456 | |||
457 | tcp_set_ca_state(newsk, TCP_CA_Open); | ||
458 | tcp_init_xmit_timers(newsk); | 500 | tcp_init_xmit_timers(newsk); |
459 | __skb_queue_head_init(&newtp->out_of_order_queue); | 501 | __skb_queue_head_init(&newtp->out_of_order_queue); |
460 | newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; | 502 | newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; |
@@ -583,7 +625,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
583 | * Reset timer after retransmitting SYNACK, similar to | 625 | * Reset timer after retransmitting SYNACK, similar to |
584 | * the idea of fast retransmit in recovery. | 626 | * the idea of fast retransmit in recovery. |
585 | */ | 627 | */ |
586 | if (!inet_rtx_syn_ack(sk, req)) | 628 | if (!tcp_oow_rate_limited(sock_net(sk), skb, |
629 | LINUX_MIB_TCPACKSKIPPEDSYNRECV, | ||
630 | &tcp_rsk(req)->last_oow_ack_time) && | ||
631 | |||
632 | !inet_rtx_syn_ack(sk, req)) | ||
587 | req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout, | 633 | req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout, |
588 | TCP_RTO_MAX) + jiffies; | 634 | TCP_RTO_MAX) + jiffies; |
589 | return NULL; | 635 | return NULL; |