aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 827cd4b9e867..34cfa58eab76 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -42,7 +42,7 @@
42 * Andi Kleen : Moved open_request checking here 42 * Andi Kleen : Moved open_request checking here
43 * and process RSTs for open_requests. 43 * and process RSTs for open_requests.
44 * Andi Kleen : Better prune_queue, and other fixes. 44 * Andi Kleen : Better prune_queue, and other fixes.
45 * Andrey Savochkin: Fix RTT measurements in the presnce of 45 * Andrey Savochkin: Fix RTT measurements in the presence of
46 * timestamps. 46 * timestamps.
47 * Andrey Savochkin: Check sequence numbers correctly when 47 * Andrey Savochkin: Check sequence numbers correctly when
48 * removing SACKs due to in sequence incoming 48 * removing SACKs due to in sequence incoming
@@ -224,7 +224,7 @@ static void tcp_fixup_sndbuf(struct sock *sk)
224 * of receiver window. Check #2. 224 * of receiver window. Check #2.
225 * 225 *
226 * The scheme does not work when sender sends good segments opening 226 * The scheme does not work when sender sends good segments opening
227 * window and then starts to feed us spagetti. But it should work 227 * window and then starts to feed us spaghetti. But it should work
228 * in common situations. Otherwise, we have to rely on queue collapsing. 228 * in common situations. Otherwise, we have to rely on queue collapsing.
229 */ 229 */
230 230
@@ -278,7 +278,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
278 int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 278 int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
279 279
280 /* Try to select rcvbuf so that 4 mss-sized segments 280 /* Try to select rcvbuf so that 4 mss-sized segments
281 * will fit to window and correspoding skbs will fit to our rcvbuf. 281 * will fit to window and corresponding skbs will fit to our rcvbuf.
282 * (was 3; 4 is minimum to allow fast retransmit to work.) 282 * (was 3; 4 is minimum to allow fast retransmit to work.)
283 */ 283 */
284 while (tcp_win_from_space(rcvmem) < tp->advmss) 284 while (tcp_win_from_space(rcvmem) < tp->advmss)
@@ -287,7 +287,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
287 sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]); 287 sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
288} 288}
289 289
290/* 4. Try to fixup all. It is made iimediately after connection enters 290/* 4. Try to fixup all. It is made immediately after connection enters
291 * established state. 291 * established state.
292 */ 292 */
293static void tcp_init_buffer_space(struct sock *sk) 293static void tcp_init_buffer_space(struct sock *sk)
@@ -367,8 +367,8 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
367 * are stalled on filesystem I/O. 367 * are stalled on filesystem I/O.
368 * 368 *
369 * Also, since we are only going for a minimum in the 369 * Also, since we are only going for a minimum in the
370 * non-timestamp case, we do not smoothe things out 370 * non-timestamp case, we do not smoother things out
371 * else with timestamps disabled convergance takes too 371 * else with timestamps disabled convergence takes too
372 * long. 372 * long.
373 */ 373 */
374 if (!win_dep) { 374 if (!win_dep) {
@@ -377,7 +377,7 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
377 } else if (m < new_sample) 377 } else if (m < new_sample)
378 new_sample = m << 3; 378 new_sample = m << 3;
379 } else { 379 } else {
380 /* No previous mesaure. */ 380 /* No previous measure. */
381 new_sample = m << 3; 381 new_sample = m << 3;
382 } 382 }
383 383
@@ -506,7 +506,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
506 if (icsk->icsk_ack.ato > icsk->icsk_rto) 506 if (icsk->icsk_ack.ato > icsk->icsk_rto)
507 icsk->icsk_ack.ato = icsk->icsk_rto; 507 icsk->icsk_ack.ato = icsk->icsk_rto;
508 } else if (m > icsk->icsk_rto) { 508 } else if (m > icsk->icsk_rto) {
509 /* Too long gap. Apparently sender falled to 509 /* Too long gap. Apparently sender failed to
510 * restart window, so that we send ACKs quickly. 510 * restart window, so that we send ACKs quickly.
511 */ 511 */
512 tcp_incr_quickack(sk); 512 tcp_incr_quickack(sk);
@@ -546,7 +546,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
546 * 546 *
547 * Funny. This algorithm seems to be very broken. 547 * Funny. This algorithm seems to be very broken.
548 * These formulae increase RTO, when it should be decreased, increase 548 * These formulae increase RTO, when it should be decreased, increase
549 * too slowly, when it should be incresed fastly, decrease too fastly 549 * too slowly, when it should be increased fastly, decrease too fastly
550 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 550 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
551 * does not matter how to _calculate_ it. Seems, it was trap 551 * does not matter how to _calculate_ it. Seems, it was trap
552 * that VJ failed to avoid. 8) 552 * that VJ failed to avoid. 8)
@@ -607,14 +607,14 @@ static inline void tcp_set_rto(struct sock *sk)
607 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 607 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
608 * to do with delayed acks, because at cwnd>2 true delack timeout 608 * to do with delayed acks, because at cwnd>2 true delack timeout
609 * is invisible. Actually, Linux-2.4 also generates erratic 609 * is invisible. Actually, Linux-2.4 also generates erratic
610 * ACKs in some curcumstances. 610 * ACKs in some circumstances.
611 */ 611 */
612 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; 612 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
613 613
614 /* 2. Fixups made earlier cannot be right. 614 /* 2. Fixups made earlier cannot be right.
615 * If we do not estimate RTO correctly without them, 615 * If we do not estimate RTO correctly without them,
616 * all the algo is pure shit and should be replaced 616 * all the algo is pure shit and should be replaced
617 * with correct one. It is exaclty, which we pretend to do. 617 * with correct one. It is exactly, which we pretend to do.
618 */ 618 */
619} 619}
620 620
@@ -772,7 +772,7 @@ static void tcp_init_metrics(struct sock *sk)
772 * to make it more realistic. 772 * to make it more realistic.
773 * 773 *
774 * A bit of theory. RTT is time passed after "normal" sized packet 774 * A bit of theory. RTT is time passed after "normal" sized packet
775 * is sent until it is ACKed. In normal curcumstances sending small 775 * is sent until it is ACKed. In normal circumstances sending small
776 * packets force peer to delay ACKs and calculation is correct too. 776 * packets force peer to delay ACKs and calculation is correct too.
777 * The algorithm is adaptive and, provided we follow specs, it 777 * The algorithm is adaptive and, provided we follow specs, it
778 * NEVER underestimate RTT. BUT! If peer tries to make some clever 778 * NEVER underestimate RTT. BUT! If peer tries to make some clever
@@ -1899,7 +1899,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1899} 1899}
1900 1900
1901/* Read draft-ietf-tcplw-high-performance before mucking 1901/* Read draft-ietf-tcplw-high-performance before mucking
1902 * with this code. (Superceeds RFC1323) 1902 * with this code. (Supersedes RFC1323)
1903 */ 1903 */
1904static void tcp_ack_saw_tstamp(struct sock *sk, int flag) 1904static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
1905{ 1905{
@@ -1912,7 +1912,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
1912 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> 1912 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
1913 * 1913 *
1914 * Changed: reset backoff as soon as we see the first valid sample. 1914 * Changed: reset backoff as soon as we see the first valid sample.
1915 * If we do not, we get strongly overstimated rto. With timestamps 1915 * If we do not, we get strongly overestimated rto. With timestamps
1916 * samples are accepted even from very old segments: f.e., when rtt=1 1916 * samples are accepted even from very old segments: f.e., when rtt=1
1917 * increases to 8, we retransmit 5 times and after 8 seconds delayed 1917 * increases to 8, we retransmit 5 times and after 8 seconds delayed
1918 * answer arrives rto becomes 120 seconds! If at least one of segments 1918 * answer arrives rto becomes 120 seconds! If at least one of segments
@@ -2268,7 +2268,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2268 } 2268 }
2269 2269
2270 /* F-RTO affects on two new ACKs following RTO. 2270 /* F-RTO affects on two new ACKs following RTO.
2271 * At latest on third ACK the TCP behavor is back to normal. 2271 * At latest on third ACK the TCP behavior is back to normal.
2272 */ 2272 */
2273 tp->frto_counter = (tp->frto_counter + 1) % 3; 2273 tp->frto_counter = (tp->frto_counter + 1) % 3;
2274} 2274}
@@ -2344,7 +2344,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2344 tcp_process_frto(sk, prior_snd_una); 2344 tcp_process_frto(sk, prior_snd_una);
2345 2345
2346 if (tcp_ack_is_dubious(sk, flag)) { 2346 if (tcp_ack_is_dubious(sk, flag)) {
2347 /* Advanve CWND, if state allows this. */ 2347 /* Advance CWND, if state allows this. */
2348 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) 2348 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
2349 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0); 2349 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
2350 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2350 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
@@ -3133,7 +3133,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3133{ 3133{
3134 struct sk_buff *skb; 3134 struct sk_buff *skb;
3135 3135
3136 /* First, check that queue is collapsable and find 3136 /* First, check that queue is collapsible and find
3137 * the point where collapsing can be useful. */ 3137 * the point where collapsing can be useful. */
3138 for (skb = head; skb != tail; ) { 3138 for (skb = head; skb != tail; ) {
3139 /* No new bits? It is possible on ofo queue. */ 3139 /* No new bits? It is possible on ofo queue. */
@@ -3441,7 +3441,7 @@ static __inline__ void tcp_ack_snd_check(struct sock *sk)
3441 3441
3442/* 3442/*
3443 * This routine is only called when we have urgent data 3443 * This routine is only called when we have urgent data
3444 * signalled. Its the 'slow' part of tcp_urg. It could be 3444 * signaled. Its the 'slow' part of tcp_urg. It could be
3445 * moved inline now as tcp_urg is only called from one 3445 * moved inline now as tcp_urg is only called from one
3446 * place. We handle URGent data wrong. We have to - as 3446 * place. We handle URGent data wrong. We have to - as
3447 * BSD still doesn't use the correction from RFC961. 3447 * BSD still doesn't use the correction from RFC961.
@@ -3486,7 +3486,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3486 * urgent. To do this requires some care. We cannot just ignore 3486 * urgent. To do this requires some care. We cannot just ignore
3487 * tp->copied_seq since we would read the last urgent byte again 3487 * tp->copied_seq since we would read the last urgent byte again
3488 * as data, nor can we alter copied_seq until this data arrives 3488 * as data, nor can we alter copied_seq until this data arrives
3489 * or we break the sematics of SIOCATMARK (and thus sockatmark()) 3489 * or we break the semantics of SIOCATMARK (and thus sockatmark())
3490 * 3490 *
3491 * NOTE. Double Dutch. Rendering to plain English: author of comment 3491 * NOTE. Double Dutch. Rendering to plain English: author of comment
3492 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 3492 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB);
@@ -3631,7 +3631,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3631 tp->rx_opt.saw_tstamp = 0; 3631 tp->rx_opt.saw_tstamp = 0;
3632 3632
3633 /* pred_flags is 0xS?10 << 16 + snd_wnd 3633 /* pred_flags is 0xS?10 << 16 + snd_wnd
3634 * if header_predition is to be made 3634 * if header_prediction is to be made
3635 * 'S' will always be tp->tcp_header_len >> 2 3635 * 'S' will always be tp->tcp_header_len >> 2
3636 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 3636 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
3637 * turn it off (when there are holes in the receive 3637 * turn it off (when there are holes in the receive