aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c55
1 files changed, 23 insertions, 32 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b55f60f6fcbe..ee0df4817498 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -182,7 +182,7 @@ static void tcp_incr_quickack(struct sock *sk)
182 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 182 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
183} 183}
184 184
185void tcp_enter_quickack_mode(struct sock *sk) 185static void tcp_enter_quickack_mode(struct sock *sk)
186{ 186{
187 struct inet_connection_sock *icsk = inet_csk(sk); 187 struct inet_connection_sock *icsk = inet_csk(sk);
188 tcp_incr_quickack(sk); 188 tcp_incr_quickack(sk);
@@ -805,25 +805,12 @@ void tcp_update_metrics(struct sock *sk)
805 } 805 }
806} 806}
807 807
808/* Numbers are taken from RFC3390.
809 *
810 * John Heffner states:
811 *
812 * The RFC specifies a window of no more than 4380 bytes
813 * unless 2*MSS > 4380. Reading the pseudocode in the RFC
814 * is a bit misleading because they use a clamp at 4380 bytes
815 * rather than use a multiplier in the relevant range.
816 */
817__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 808__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
818{ 809{
819 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 810 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
820 811
821 if (!cwnd) { 812 if (!cwnd)
822 if (tp->mss_cache > 1460) 813 cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
823 cwnd = 2;
824 else
825 cwnd = (tp->mss_cache > 1095) ? 3 : 4;
826 }
827 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 814 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
828} 815}
829 816
@@ -2314,7 +2301,7 @@ static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
2314 2301
2315static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 2302static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
2316{ 2303{
2317 return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); 2304 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
2318} 2305}
2319 2306
2320static inline int tcp_head_timedout(struct sock *sk) 2307static inline int tcp_head_timedout(struct sock *sk)
@@ -2508,7 +2495,7 @@ static void tcp_timeout_skbs(struct sock *sk)
2508/* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2495/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
2509 * is against sacked "cnt", otherwise it's against facked "cnt" 2496 * is against sacked "cnt", otherwise it's against facked "cnt"
2510 */ 2497 */
2511static void tcp_mark_head_lost(struct sock *sk, int packets) 2498static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2512{ 2499{
2513 struct tcp_sock *tp = tcp_sk(sk); 2500 struct tcp_sock *tp = tcp_sk(sk);
2514 struct sk_buff *skb; 2501 struct sk_buff *skb;
@@ -2516,13 +2503,13 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2516 int err; 2503 int err;
2517 unsigned int mss; 2504 unsigned int mss;
2518 2505
2519 if (packets == 0)
2520 return;
2521
2522 WARN_ON(packets > tp->packets_out); 2506 WARN_ON(packets > tp->packets_out);
2523 if (tp->lost_skb_hint) { 2507 if (tp->lost_skb_hint) {
2524 skb = tp->lost_skb_hint; 2508 skb = tp->lost_skb_hint;
2525 cnt = tp->lost_cnt_hint; 2509 cnt = tp->lost_cnt_hint;
2510 /* Head already handled? */
2511 if (mark_head && skb != tcp_write_queue_head(sk))
2512 return;
2526 } else { 2513 } else {
2527 skb = tcp_write_queue_head(sk); 2514 skb = tcp_write_queue_head(sk);
2528 cnt = 0; 2515 cnt = 0;
@@ -2557,6 +2544,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2557 } 2544 }
2558 2545
2559 tcp_skb_mark_lost(tp, skb); 2546 tcp_skb_mark_lost(tp, skb);
2547
2548 if (mark_head)
2549 break;
2560 } 2550 }
2561 tcp_verify_left_out(tp); 2551 tcp_verify_left_out(tp);
2562} 2552}
@@ -2568,17 +2558,18 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2568 struct tcp_sock *tp = tcp_sk(sk); 2558 struct tcp_sock *tp = tcp_sk(sk);
2569 2559
2570 if (tcp_is_reno(tp)) { 2560 if (tcp_is_reno(tp)) {
2571 tcp_mark_head_lost(sk, 1); 2561 tcp_mark_head_lost(sk, 1, 1);
2572 } else if (tcp_is_fack(tp)) { 2562 } else if (tcp_is_fack(tp)) {
2573 int lost = tp->fackets_out - tp->reordering; 2563 int lost = tp->fackets_out - tp->reordering;
2574 if (lost <= 0) 2564 if (lost <= 0)
2575 lost = 1; 2565 lost = 1;
2576 tcp_mark_head_lost(sk, lost); 2566 tcp_mark_head_lost(sk, lost, 0);
2577 } else { 2567 } else {
2578 int sacked_upto = tp->sacked_out - tp->reordering; 2568 int sacked_upto = tp->sacked_out - tp->reordering;
2579 if (sacked_upto < fast_rexmit) 2569 if (sacked_upto >= 0)
2580 sacked_upto = fast_rexmit; 2570 tcp_mark_head_lost(sk, sacked_upto, 0);
2581 tcp_mark_head_lost(sk, sacked_upto); 2571 else if (fast_rexmit)
2572 tcp_mark_head_lost(sk, 1, 1);
2582 } 2573 }
2583 2574
2584 tcp_timeout_skbs(sk); 2575 tcp_timeout_skbs(sk);
@@ -2887,7 +2878,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
2887 icsk->icsk_mtup.probe_size; 2878 icsk->icsk_mtup.probe_size;
2888 tp->snd_cwnd_cnt = 0; 2879 tp->snd_cwnd_cnt = 0;
2889 tp->snd_cwnd_stamp = tcp_time_stamp; 2880 tp->snd_cwnd_stamp = tcp_time_stamp;
2890 tp->rcv_ssthresh = tcp_current_ssthresh(sk); 2881 tp->snd_ssthresh = tcp_current_ssthresh(sk);
2891 2882
2892 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2883 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2893 icsk->icsk_mtup.probe_size = 0; 2884 icsk->icsk_mtup.probe_size = 0;
@@ -2984,7 +2975,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2984 before(tp->snd_una, tp->high_seq) && 2975 before(tp->snd_una, tp->high_seq) &&
2985 icsk->icsk_ca_state != TCP_CA_Open && 2976 icsk->icsk_ca_state != TCP_CA_Open &&
2986 tp->fackets_out > tp->reordering) { 2977 tp->fackets_out > tp->reordering) {
2987 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); 2978 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
2988 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS); 2979 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
2989 } 2980 }
2990 2981
@@ -3412,8 +3403,8 @@ static void tcp_ack_probe(struct sock *sk)
3412 3403
3413static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 3404static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
3414{ 3405{
3415 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3406 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3416 inet_csk(sk)->icsk_ca_state != TCP_CA_Open); 3407 inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3417} 3408}
3418 3409
3419static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3410static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
@@ -3430,9 +3421,9 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
3430 const u32 ack, const u32 ack_seq, 3421 const u32 ack, const u32 ack_seq,
3431 const u32 nwin) 3422 const u32 nwin)
3432{ 3423{
3433 return (after(ack, tp->snd_una) || 3424 return after(ack, tp->snd_una) ||
3434 after(ack_seq, tp->snd_wl1) || 3425 after(ack_seq, tp->snd_wl1) ||
3435 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd)); 3426 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
3436} 3427}
3437 3428
3438/* Update our send window. 3429/* Update our send window.