aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 672854664ff5..7820f3a7dd70 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1875,8 +1875,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1875 * - better RTT estimation and ACK scheduling 1875 * - better RTT estimation and ACK scheduling
1876 * - faster recovery 1876 * - faster recovery
1877 * - high rates 1877 * - high rates
1878 * Alas, some drivers / subsystems require a fair amount
1879 * of queued bytes to ensure line rate.
1880 * One example is wifi aggregation (802.11 AMPDU)
1878 */ 1881 */
1879 limit = max(skb->truesize, sk->sk_pacing_rate >> 10); 1882 limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
1883 sk->sk_pacing_rate >> 10);
1880 1884
1881 if (atomic_read(&sk->sk_wmem_alloc) > limit) { 1885 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1882 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1886 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
@@ -3093,7 +3097,6 @@ void tcp_send_window_probe(struct sock *sk)
3093{ 3097{
3094 if (sk->sk_state == TCP_ESTABLISHED) { 3098 if (sk->sk_state == TCP_ESTABLISHED) {
3095 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3099 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3096 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
3097 tcp_xmit_probe_skb(sk, 0); 3100 tcp_xmit_probe_skb(sk, 0);
3098 } 3101 }
3099} 3102}