aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c76
1 files changed, 37 insertions, 39 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7bbbbc33eb4b..8de2f1071c2b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -740,10 +740,10 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
740 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 740 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
741 741
742 if (!cwnd) { 742 if (!cwnd) {
743 if (tp->mss_cache_std > 1460) 743 if (tp->mss_cache > 1460)
744 cwnd = 2; 744 cwnd = 2;
745 else 745 else
746 cwnd = (tp->mss_cache_std > 1095) ? 3 : 4; 746 cwnd = (tp->mss_cache > 1095) ? 3 : 4;
747 } 747 }
748 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 748 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
749} 749}
@@ -914,7 +914,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
914 if (sk->sk_route_caps & NETIF_F_TSO) { 914 if (sk->sk_route_caps & NETIF_F_TSO) {
915 sk->sk_route_caps &= ~NETIF_F_TSO; 915 sk->sk_route_caps &= ~NETIF_F_TSO;
916 sock_set_flag(sk, SOCK_NO_LARGESEND); 916 sock_set_flag(sk, SOCK_NO_LARGESEND);
917 tp->mss_cache = tp->mss_cache_std; 917 tp->mss_cache = tp->mss_cache;
918 } 918 }
919 919
920 if (!tp->sacked_out) 920 if (!tp->sacked_out)
@@ -1077,7 +1077,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1077 (IsFack(tp) || 1077 (IsFack(tp) ||
1078 !before(lost_retrans, 1078 !before(lost_retrans,
1079 TCP_SKB_CB(skb)->ack_seq + tp->reordering * 1079 TCP_SKB_CB(skb)->ack_seq + tp->reordering *
1080 tp->mss_cache_std))) { 1080 tp->mss_cache))) {
1081 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1081 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1082 tp->retrans_out -= tcp_skb_pcount(skb); 1082 tp->retrans_out -= tcp_skb_pcount(skb);
1083 1083
@@ -1957,15 +1957,6 @@ static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
1957 } 1957 }
1958} 1958}
1959 1959
1960/* There is one downside to this scheme. Although we keep the
1961 * ACK clock ticking, adjusting packet counters and advancing
1962 * congestion window, we do not liberate socket send buffer
1963 * space.
1964 *
1965 * Mucking with skb->truesize and sk->sk_wmem_alloc et al.
1966 * then making a write space wakeup callback is a possible
1967 * future enhancement. WARNING: it is not trivial to make.
1968 */
1969static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, 1960static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
1970 __u32 now, __s32 *seq_rtt) 1961 __u32 now, __s32 *seq_rtt)
1971{ 1962{
@@ -2047,7 +2038,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2047 * the other end. 2038 * the other end.
2048 */ 2039 */
2049 if (after(scb->end_seq, tp->snd_una)) { 2040 if (after(scb->end_seq, tp->snd_una)) {
2050 if (tcp_skb_pcount(skb) > 1) 2041 if (tcp_skb_pcount(skb) > 1 &&
2042 after(tp->snd_una, scb->seq))
2051 acked |= tcp_tso_acked(sk, skb, 2043 acked |= tcp_tso_acked(sk, skb,
2052 now, &seq_rtt); 2044 now, &seq_rtt);
2053 break; 2045 break;
@@ -3308,6 +3300,28 @@ void tcp_cwnd_application_limited(struct sock *sk)
3308 tp->snd_cwnd_stamp = tcp_time_stamp; 3300 tp->snd_cwnd_stamp = tcp_time_stamp;
3309} 3301}
3310 3302
3303static inline int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
3304{
3305 /* If the user specified a specific send buffer setting, do
3306 * not modify it.
3307 */
3308 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
3309 return 0;
3310
3311 /* If we are under global TCP memory pressure, do not expand. */
3312 if (tcp_memory_pressure)
3313 return 0;
3314
3315 /* If we are under soft global TCP memory pressure, do not expand. */
3316 if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
3317 return 0;
3318
3319 /* If we filled the congestion window, do not expand. */
3320 if (tp->packets_out >= tp->snd_cwnd)
3321 return 0;
3322
3323 return 1;
3324}
3311 3325
3312/* When incoming ACK allowed to free some skb from write_queue, 3326/* When incoming ACK allowed to free some skb from write_queue,
3313 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 3327 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
@@ -3319,11 +3333,8 @@ static void tcp_new_space(struct sock *sk)
3319{ 3333{
3320 struct tcp_sock *tp = tcp_sk(sk); 3334 struct tcp_sock *tp = tcp_sk(sk);
3321 3335
3322 if (tp->packets_out < tp->snd_cwnd && 3336 if (tcp_should_expand_sndbuf(sk, tp)) {
3323 !(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && 3337 int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
3324 !tcp_memory_pressure &&
3325 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
3326 int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache_std) +
3327 MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), 3338 MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
3328 demanded = max_t(unsigned int, tp->snd_cwnd, 3339 demanded = max_t(unsigned int, tp->snd_cwnd,
3329 tp->reordering + 1); 3340 tp->reordering + 1);
@@ -3346,22 +3357,9 @@ static inline void tcp_check_space(struct sock *sk)
3346 } 3357 }
3347} 3358}
3348 3359
3349static void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb) 3360static __inline__ void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
3350{
3351 struct tcp_sock *tp = tcp_sk(sk);
3352
3353 if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) ||
3354 tcp_packets_in_flight(tp) >= tp->snd_cwnd ||
3355 tcp_write_xmit(sk, tp->nonagle))
3356 tcp_check_probe_timer(sk, tp);
3357}
3358
3359static __inline__ void tcp_data_snd_check(struct sock *sk)
3360{ 3361{
3361 struct sk_buff *skb = sk->sk_send_head; 3362 tcp_push_pending_frames(sk, tp);
3362
3363 if (skb != NULL)
3364 __tcp_data_snd_check(sk, skb);
3365 tcp_check_space(sk); 3363 tcp_check_space(sk);
3366} 3364}
3367 3365
@@ -3655,7 +3653,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3655 */ 3653 */
3656 tcp_ack(sk, skb, 0); 3654 tcp_ack(sk, skb, 0);
3657 __kfree_skb(skb); 3655 __kfree_skb(skb);
3658 tcp_data_snd_check(sk); 3656 tcp_data_snd_check(sk, tp);
3659 return 0; 3657 return 0;
3660 } else { /* Header too small */ 3658 } else { /* Header too small */
3661 TCP_INC_STATS_BH(TCP_MIB_INERRS); 3659 TCP_INC_STATS_BH(TCP_MIB_INERRS);
@@ -3721,7 +3719,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3721 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 3719 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
3722 /* Well, only one small jumplet in fast path... */ 3720 /* Well, only one small jumplet in fast path... */
3723 tcp_ack(sk, skb, FLAG_DATA); 3721 tcp_ack(sk, skb, FLAG_DATA);
3724 tcp_data_snd_check(sk); 3722 tcp_data_snd_check(sk, tp);
3725 if (!tcp_ack_scheduled(tp)) 3723 if (!tcp_ack_scheduled(tp))
3726 goto no_ack; 3724 goto no_ack;
3727 } 3725 }
@@ -3799,7 +3797,7 @@ step5:
3799 /* step 7: process the segment text */ 3797 /* step 7: process the segment text */
3800 tcp_data_queue(sk, skb); 3798 tcp_data_queue(sk, skb);
3801 3799
3802 tcp_data_snd_check(sk); 3800 tcp_data_snd_check(sk, tp);
3803 tcp_ack_snd_check(sk); 3801 tcp_ack_snd_check(sk);
3804 return 0; 3802 return 0;
3805 3803
@@ -4109,7 +4107,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4109 /* Do step6 onward by hand. */ 4107 /* Do step6 onward by hand. */
4110 tcp_urg(sk, skb, th); 4108 tcp_urg(sk, skb, th);
4111 __kfree_skb(skb); 4109 __kfree_skb(skb);
4112 tcp_data_snd_check(sk); 4110 tcp_data_snd_check(sk, tp);
4113 return 0; 4111 return 0;
4114 } 4112 }
4115 4113
@@ -4300,7 +4298,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4300 4298
4301 /* tcp_data could move socket to TIME-WAIT */ 4299 /* tcp_data could move socket to TIME-WAIT */
4302 if (sk->sk_state != TCP_CLOSE) { 4300 if (sk->sk_state != TCP_CLOSE) {
4303 tcp_data_snd_check(sk); 4301 tcp_data_snd_check(sk, tp);
4304 tcp_ack_snd_check(sk); 4302 tcp_ack_snd_check(sk);
4305 } 4303 }
4306 4304