aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2005-11-10 20:13:47 -0500
committerDavid S. Miller <davem@davemloft.net>2005-11-10 20:13:47 -0500
commitcaa20d9abe810be2ede9612b6c9db6ce7d6edf80 (patch)
tree18d027397fa5ddf9dcfb193ba114f3b8276199e2 /net
parent326f36e9e7de362e09745ce6f84b65e7ccac33ba (diff)
[TCP]: spelling fixes
Minor spelling fixes for TCP code. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c40
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_timer.c4
6 files changed, 31 insertions, 31 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cfaf76133759..9ac7a4f46bd8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1640,7 +1640,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1640 } else if (tcp_need_reset(old_state) || 1640 } else if (tcp_need_reset(old_state) ||
1641 (tp->snd_nxt != tp->write_seq && 1641 (tp->snd_nxt != tp->write_seq &&
1642 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 1642 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1643 /* The last check adjusts for discrepance of Linux wrt. RFC 1643 /* The last check adjusts for discrepancy of Linux wrt. RFC
1644 * states 1644 * states
1645 */ 1645 */
1646 tcp_send_active_reset(sk, gfp_any()); 1646 tcp_send_active_reset(sk, gfp_any());
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 827cd4b9e867..34cfa58eab76 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -42,7 +42,7 @@
42 * Andi Kleen : Moved open_request checking here 42 * Andi Kleen : Moved open_request checking here
43 * and process RSTs for open_requests. 43 * and process RSTs for open_requests.
44 * Andi Kleen : Better prune_queue, and other fixes. 44 * Andi Kleen : Better prune_queue, and other fixes.
45 * Andrey Savochkin: Fix RTT measurements in the presnce of 45 * Andrey Savochkin: Fix RTT measurements in the presence of
46 * timestamps. 46 * timestamps.
47 * Andrey Savochkin: Check sequence numbers correctly when 47 * Andrey Savochkin: Check sequence numbers correctly when
48 * removing SACKs due to in sequence incoming 48 * removing SACKs due to in sequence incoming
@@ -224,7 +224,7 @@ static void tcp_fixup_sndbuf(struct sock *sk)
224 * of receiver window. Check #2. 224 * of receiver window. Check #2.
225 * 225 *
226 * The scheme does not work when sender sends good segments opening 226 * The scheme does not work when sender sends good segments opening
227 * window and then starts to feed us spagetti. But it should work 227 * window and then starts to feed us spaghetti. But it should work
228 * in common situations. Otherwise, we have to rely on queue collapsing. 228 * in common situations. Otherwise, we have to rely on queue collapsing.
229 */ 229 */
230 230
@@ -278,7 +278,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
278 int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 278 int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
279 279
280 /* Try to select rcvbuf so that 4 mss-sized segments 280 /* Try to select rcvbuf so that 4 mss-sized segments
281 * will fit to window and correspoding skbs will fit to our rcvbuf. 281 * will fit to window and corresponding skbs will fit to our rcvbuf.
282 * (was 3; 4 is minimum to allow fast retransmit to work.) 282 * (was 3; 4 is minimum to allow fast retransmit to work.)
283 */ 283 */
284 while (tcp_win_from_space(rcvmem) < tp->advmss) 284 while (tcp_win_from_space(rcvmem) < tp->advmss)
@@ -287,7 +287,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
287 sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]); 287 sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
288} 288}
289 289
290/* 4. Try to fixup all. It is made iimediately after connection enters 290/* 4. Try to fixup all. It is made immediately after connection enters
291 * established state. 291 * established state.
292 */ 292 */
293static void tcp_init_buffer_space(struct sock *sk) 293static void tcp_init_buffer_space(struct sock *sk)
@@ -367,8 +367,8 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
367 * are stalled on filesystem I/O. 367 * are stalled on filesystem I/O.
368 * 368 *
369 * Also, since we are only going for a minimum in the 369 * Also, since we are only going for a minimum in the
370 * non-timestamp case, we do not smoothe things out 370 * non-timestamp case, we do not smoother things out
371 * else with timestamps disabled convergance takes too 371 * else with timestamps disabled convergence takes too
372 * long. 372 * long.
373 */ 373 */
374 if (!win_dep) { 374 if (!win_dep) {
@@ -377,7 +377,7 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
377 } else if (m < new_sample) 377 } else if (m < new_sample)
378 new_sample = m << 3; 378 new_sample = m << 3;
379 } else { 379 } else {
380 /* No previous mesaure. */ 380 /* No previous measure. */
381 new_sample = m << 3; 381 new_sample = m << 3;
382 } 382 }
383 383
@@ -506,7 +506,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
506 if (icsk->icsk_ack.ato > icsk->icsk_rto) 506 if (icsk->icsk_ack.ato > icsk->icsk_rto)
507 icsk->icsk_ack.ato = icsk->icsk_rto; 507 icsk->icsk_ack.ato = icsk->icsk_rto;
508 } else if (m > icsk->icsk_rto) { 508 } else if (m > icsk->icsk_rto) {
509 /* Too long gap. Apparently sender falled to 509 /* Too long gap. Apparently sender failed to
510 * restart window, so that we send ACKs quickly. 510 * restart window, so that we send ACKs quickly.
511 */ 511 */
512 tcp_incr_quickack(sk); 512 tcp_incr_quickack(sk);
@@ -546,7 +546,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
546 * 546 *
547 * Funny. This algorithm seems to be very broken. 547 * Funny. This algorithm seems to be very broken.
548 * These formulae increase RTO, when it should be decreased, increase 548 * These formulae increase RTO, when it should be decreased, increase
549 * too slowly, when it should be incresed fastly, decrease too fastly 549 * too slowly, when it should be increased fastly, decrease too fastly
550 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 550 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
551 * does not matter how to _calculate_ it. Seems, it was trap 551 * does not matter how to _calculate_ it. Seems, it was trap
552 * that VJ failed to avoid. 8) 552 * that VJ failed to avoid. 8)
@@ -607,14 +607,14 @@ static inline void tcp_set_rto(struct sock *sk)
607 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 607 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
608 * to do with delayed acks, because at cwnd>2 true delack timeout 608 * to do with delayed acks, because at cwnd>2 true delack timeout
609 * is invisible. Actually, Linux-2.4 also generates erratic 609 * is invisible. Actually, Linux-2.4 also generates erratic
610 * ACKs in some curcumstances. 610 * ACKs in some circumstances.
611 */ 611 */
612 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; 612 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
613 613
614 /* 2. Fixups made earlier cannot be right. 614 /* 2. Fixups made earlier cannot be right.
615 * If we do not estimate RTO correctly without them, 615 * If we do not estimate RTO correctly without them,
616 * all the algo is pure shit and should be replaced 616 * all the algo is pure shit and should be replaced
617 * with correct one. It is exaclty, which we pretend to do. 617 * with correct one. It is exactly, which we pretend to do.
618 */ 618 */
619} 619}
620 620
@@ -772,7 +772,7 @@ static void tcp_init_metrics(struct sock *sk)
772 * to make it more realistic. 772 * to make it more realistic.
773 * 773 *
774 * A bit of theory. RTT is time passed after "normal" sized packet 774 * A bit of theory. RTT is time passed after "normal" sized packet
775 * is sent until it is ACKed. In normal curcumstances sending small 775 * is sent until it is ACKed. In normal circumstances sending small
776 * packets force peer to delay ACKs and calculation is correct too. 776 * packets force peer to delay ACKs and calculation is correct too.
777 * The algorithm is adaptive and, provided we follow specs, it 777 * The algorithm is adaptive and, provided we follow specs, it
778 * NEVER underestimate RTT. BUT! If peer tries to make some clever 778 * NEVER underestimate RTT. BUT! If peer tries to make some clever
@@ -1899,7 +1899,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1899} 1899}
1900 1900
1901/* Read draft-ietf-tcplw-high-performance before mucking 1901/* Read draft-ietf-tcplw-high-performance before mucking
1902 * with this code. (Superceeds RFC1323) 1902 * with this code. (Supersedes RFC1323)
1903 */ 1903 */
1904static void tcp_ack_saw_tstamp(struct sock *sk, int flag) 1904static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
1905{ 1905{
@@ -1912,7 +1912,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
1912 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> 1912 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
1913 * 1913 *
1914 * Changed: reset backoff as soon as we see the first valid sample. 1914 * Changed: reset backoff as soon as we see the first valid sample.
1915 * If we do not, we get strongly overstimated rto. With timestamps 1915 * If we do not, we get strongly overestimated rto. With timestamps
1916 * samples are accepted even from very old segments: f.e., when rtt=1 1916 * samples are accepted even from very old segments: f.e., when rtt=1
1917 * increases to 8, we retransmit 5 times and after 8 seconds delayed 1917 * increases to 8, we retransmit 5 times and after 8 seconds delayed
1918 * answer arrives rto becomes 120 seconds! If at least one of segments 1918 * answer arrives rto becomes 120 seconds! If at least one of segments
@@ -2268,7 +2268,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2268 } 2268 }
2269 2269
2270 /* F-RTO affects on two new ACKs following RTO. 2270 /* F-RTO affects on two new ACKs following RTO.
2271 * At latest on third ACK the TCP behavor is back to normal. 2271 * At latest on third ACK the TCP behavior is back to normal.
2272 */ 2272 */
2273 tp->frto_counter = (tp->frto_counter + 1) % 3; 2273 tp->frto_counter = (tp->frto_counter + 1) % 3;
2274} 2274}
@@ -2344,7 +2344,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2344 tcp_process_frto(sk, prior_snd_una); 2344 tcp_process_frto(sk, prior_snd_una);
2345 2345
2346 if (tcp_ack_is_dubious(sk, flag)) { 2346 if (tcp_ack_is_dubious(sk, flag)) {
2347 /* Advanve CWND, if state allows this. */ 2347 /* Advance CWND, if state allows this. */
2348 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) 2348 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
2349 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0); 2349 tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
2350 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2350 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
@@ -3133,7 +3133,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3133{ 3133{
3134 struct sk_buff *skb; 3134 struct sk_buff *skb;
3135 3135
3136 /* First, check that queue is collapsable and find 3136 /* First, check that queue is collapsible and find
3137 * the point where collapsing can be useful. */ 3137 * the point where collapsing can be useful. */
3138 for (skb = head; skb != tail; ) { 3138 for (skb = head; skb != tail; ) {
3139 /* No new bits? It is possible on ofo queue. */ 3139 /* No new bits? It is possible on ofo queue. */
@@ -3441,7 +3441,7 @@ static __inline__ void tcp_ack_snd_check(struct sock *sk)
3441 3441
3442/* 3442/*
3443 * This routine is only called when we have urgent data 3443 * This routine is only called when we have urgent data
3444 * signalled. Its the 'slow' part of tcp_urg. It could be 3444 * signaled. Its the 'slow' part of tcp_urg. It could be
3445 * moved inline now as tcp_urg is only called from one 3445 * moved inline now as tcp_urg is only called from one
3446 * place. We handle URGent data wrong. We have to - as 3446 * place. We handle URGent data wrong. We have to - as
3447 * BSD still doesn't use the correction from RFC961. 3447 * BSD still doesn't use the correction from RFC961.
@@ -3486,7 +3486,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3486 * urgent. To do this requires some care. We cannot just ignore 3486 * urgent. To do this requires some care. We cannot just ignore
3487 * tp->copied_seq since we would read the last urgent byte again 3487 * tp->copied_seq since we would read the last urgent byte again
3488 * as data, nor can we alter copied_seq until this data arrives 3488 * as data, nor can we alter copied_seq until this data arrives
3489 * or we break the sematics of SIOCATMARK (and thus sockatmark()) 3489 * or we break the semantics of SIOCATMARK (and thus sockatmark())
3490 * 3490 *
3491 * NOTE. Double Dutch. Rendering to plain English: author of comment 3491 * NOTE. Double Dutch. Rendering to plain English: author of comment
3492 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 3492 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB);
@@ -3631,7 +3631,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3631 tp->rx_opt.saw_tstamp = 0; 3631 tp->rx_opt.saw_tstamp = 0;
3632 3632
3633 /* pred_flags is 0xS?10 << 16 + snd_wnd 3633 /* pred_flags is 0xS?10 << 16 + snd_wnd
3634 * if header_predition is to be made 3634 * if header_prediction is to be made
3635 * 'S' will always be tp->tcp_header_len >> 2 3635 * 'S' will always be tp->tcp_header_len >> 2
3636 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 3636 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
3637 * turn it off (when there are holes in the receive 3637 * turn it off (when there are holes in the receive
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ac1fcf5b4ebc..4d5021e1929b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -39,7 +39,7 @@
39 * request_sock handling and moved 39 * request_sock handling and moved
40 * most of it into the af independent code. 40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes. 41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics. 42 * Added new listen semantics.
43 * Mike McLagan : Routing by source 43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits 44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes. 45 * Andi Kleen: various fixes.
@@ -1210,7 +1210,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1210 1210
1211 /* An explanation is required here, I think. 1211 /* An explanation is required here, I think.
1212 * Packet length and doff are validated by header prediction, 1212 * Packet length and doff are validated by header prediction,
1213 * provided case of th->doff==0 is elimineted. 1213 * provided case of th->doff==0 is eliminated.
1214 * So, we defer the checks. */ 1214 * So, we defer the checks. */
1215 if ((skb->ip_summed != CHECKSUM_UNNECESSARY && 1215 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1216 tcp_v4_checksum_init(skb))) 1216 tcp_v4_checksum_init(skb)))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9203a21e299f..1b66a2ac4321 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -158,7 +158,7 @@ kill_with_rst:
158 /* I am shamed, but failed to make it more elegant. 158 /* I am shamed, but failed to make it more elegant.
159 * Yes, it is direct reference to IP, which is impossible 159 * Yes, it is direct reference to IP, which is impossible
160 * to generalize to IPv6. Taking into account that IPv6 160 * to generalize to IPv6. Taking into account that IPv6
161 * do not undertsnad recycling in any case, it not 161 * do not understand recycling in any case, it not
162 * a big problem in practice. --ANK */ 162 * a big problem in practice. --ANK */
163 if (tw->tw_family == AF_INET && 163 if (tw->tw_family == AF_INET &&
164 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp && 164 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
@@ -194,7 +194,7 @@ kill_with_rst:
194 /* In window segment, it may be only reset or bare ack. */ 194 /* In window segment, it may be only reset or bare ack. */
195 195
196 if (th->rst) { 196 if (th->rst) {
197 /* This is TIME_WAIT assasination, in two flavors. 197 /* This is TIME_WAIT assassination, in two flavors.
198 * Oh well... nobody has a sufficient solution to this 198 * Oh well... nobody has a sufficient solution to this
199 * protocol bug yet. 199 * protocol bug yet.
200 */ 200 */
@@ -551,7 +551,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
551 551
552 /* RFC793 page 36: "If the connection is in any non-synchronized state ... 552 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
553 * and the incoming segment acknowledges something not yet 553 * and the incoming segment acknowledges something not yet
554 * sent (the segment carries an unaccaptable ACK) ... 554 * sent (the segment carries an unacceptable ACK) ...
555 * a reset is sent." 555 * a reset is sent."
556 * 556 *
557 * Invalid ACK: reset will be sent by listening socket 557 * Invalid ACK: reset will be sent by listening socket
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 998f6416ef8b..602e7057e438 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -599,7 +599,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
599 for TCP options, but includes only bare TCP header. 599 for TCP options, but includes only bare TCP header.
600 600
601 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 601 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
602 It is minumum of user_mss and mss received with SYN. 602 It is minimum of user_mss and mss received with SYN.
603 It also does not include TCP options. 603 It also does not include TCP options.
604 604
605 tp->pmtu_cookie is last pmtu, seen by this function. 605 tp->pmtu_cookie is last pmtu, seen by this function.
@@ -1171,7 +1171,7 @@ u32 __tcp_select_window(struct sock *sk)
1171{ 1171{
1172 struct inet_connection_sock *icsk = inet_csk(sk); 1172 struct inet_connection_sock *icsk = inet_csk(sk);
1173 struct tcp_sock *tp = tcp_sk(sk); 1173 struct tcp_sock *tp = tcp_sk(sk);
1174 /* MSS for the peer's data. Previous verions used mss_clamp 1174 /* MSS for the peer's data. Previous versions used mss_clamp
1175 * here. I don't know if the value based on our guesses 1175 * here. I don't know if the value based on our guesses
1176 * of peer's MSS is better for the performance. It's more correct 1176 * of peer's MSS is better for the performance. It's more correct
1177 * but may be worse for the performance because of rcv_mss 1177 * but may be worse for the performance because of rcv_mss
@@ -1361,7 +1361,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1361 int err; 1361 int err;
1362 1362
1363 /* Do not sent more than we queued. 1/4 is reserved for possible 1363 /* Do not sent more than we queued. 1/4 is reserved for possible
1364 * copying overhead: frgagmentation, tunneling, mangling etc. 1364 * copying overhead: fragmentation, tunneling, mangling etc.
1365 */ 1365 */
1366 if (atomic_read(&sk->sk_wmem_alloc) > 1366 if (atomic_read(&sk->sk_wmem_alloc) >
1367 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 1367 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 415ee47ac1c5..e1880959614a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -58,7 +58,7 @@ static void tcp_write_err(struct sock *sk)
58 * to prevent DoS attacks. It is called when a retransmission timeout 58 * to prevent DoS attacks. It is called when a retransmission timeout
59 * or zero probe timeout occurs on orphaned socket. 59 * or zero probe timeout occurs on orphaned socket.
60 * 60 *
61 * Criterium is still not confirmed experimentally and may change. 61 * Criteria is still not confirmed experimentally and may change.
62 * We kill the socket, if: 62 * We kill the socket, if:
63 * 1. If number of orphaned sockets exceeds an administratively configured 63 * 1. If number of orphaned sockets exceeds an administratively configured
64 * limit. 64 * limit.
@@ -132,7 +132,7 @@ static int tcp_write_timeout(struct sock *sk)
132 hole detection. :-( 132 hole detection. :-(
133 133
134 It is place to make it. It is not made. I do not want 134 It is place to make it. It is not made. I do not want
135 to make it. It is disguisting. It does not work in any 135 to make it. It is disgusting. It does not work in any
136 case. Let me to cite the same draft, which requires for 136 case. Let me to cite the same draft, which requires for
137 us to implement this: 137 us to implement this:
138 138