diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 998f6416ef8b..602e7057e438 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -599,7 +599,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) | |||
599 | for TCP options, but includes only bare TCP header. | 599 | for TCP options, but includes only bare TCP header. |
600 | 600 | ||
601 | tp->rx_opt.mss_clamp is mss negotiated at connection setup. | 601 | tp->rx_opt.mss_clamp is mss negotiated at connection setup. |
602 | It is minumum of user_mss and mss received with SYN. | 602 | It is minimum of user_mss and mss received with SYN. |
603 | It also does not include TCP options. | 603 | It also does not include TCP options. |
604 | 604 | ||
605 | tp->pmtu_cookie is last pmtu, seen by this function. | 605 | tp->pmtu_cookie is last pmtu, seen by this function. |
@@ -1171,7 +1171,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1171 | { | 1171 | { |
1172 | struct inet_connection_sock *icsk = inet_csk(sk); | 1172 | struct inet_connection_sock *icsk = inet_csk(sk); |
1173 | struct tcp_sock *tp = tcp_sk(sk); | 1173 | struct tcp_sock *tp = tcp_sk(sk); |
1174 | /* MSS for the peer's data. Previous verions used mss_clamp | 1174 | /* MSS for the peer's data. Previous versions used mss_clamp |
1175 | * here. I don't know if the value based on our guesses | 1175 | * here. I don't know if the value based on our guesses |
1176 | * of peer's MSS is better for the performance. It's more correct | 1176 | * of peer's MSS is better for the performance. It's more correct |
1177 | * but may be worse for the performance because of rcv_mss | 1177 | * but may be worse for the performance because of rcv_mss |
@@ -1361,7 +1361,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1361 | int err; | 1361 | int err; |
1362 | 1362 | ||
1363 | /* Do not sent more than we queued. 1/4 is reserved for possible | 1363 | /* Do not sent more than we queued. 1/4 is reserved for possible |
1364 | * copying overhead: frgagmentation, tunneling, mangling etc. | 1364 | * copying overhead: fragmentation, tunneling, mangling etc. |
1365 | */ | 1365 | */ |
1366 | if (atomic_read(&sk->sk_wmem_alloc) > | 1366 | if (atomic_read(&sk->sk_wmem_alloc) > |
1367 | min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) | 1367 | min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) |