aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-04-21 01:18:02 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:29:34 -0400
commit9e412ba7632f71259a53085665d4983b78257b7c (patch)
treeb02d6df7e5357a741bf6d52a93e04a52b84f1f90 /net/ipv4/tcp_output.c
parent38b4da383705394788aa09208917ba200792de4b (diff)
[TCP]: Sed magic converts func(sk, tp, ...) -> func(sk, ...)
This is (mostly) automated change using magic: sed -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N' -e 's|struct sock \*sk,[\n\t ]*struct tcp_sock \*tp\([^{]*\n{\n\)| struct sock \*sk\1\tstruct tcp_sock *tp = tcp_sk(sk);\n|g' -e 's|struct sock \*sk, struct tcp_sock \*tp| struct sock \*sk|g' -e 's|sk, tp\([^-]\)|sk\1|g' Fixed four unused variable (tp) warnings that were introduced. In addition, manually added newlines after local variables and tweaked function arguments positioning. $ gcc --version gcc (GCC) 4.1.1 20060525 (Red Hat 4.1.1-1) ... $ codiff -fV built-in.o.old built-in.o.new net/ipv4/route.c: rt_cache_flush | +14 1 function changed, 14 bytes added net/ipv4/tcp.c: tcp_setsockopt | -5 tcp_sendpage | -25 tcp_sendmsg | -16 3 functions changed, 46 bytes removed net/ipv4/tcp_input.c: tcp_try_undo_recovery | +3 tcp_try_undo_dsack | +2 tcp_mark_head_lost | -12 tcp_ack | -15 tcp_event_data_recv | -32 tcp_rcv_state_process | -10 tcp_rcv_established | +1 7 functions changed, 6 bytes added, 69 bytes removed, diff: -63 net/ipv4/tcp_output.c: update_send_head | -9 tcp_transmit_skb | +19 tcp_cwnd_validate | +1 tcp_write_wakeup | -17 __tcp_push_pending_frames | -25 tcp_push_one | -8 tcp_send_fin | -4 7 functions changed, 20 bytes added, 63 bytes removed, diff: -43 built-in.o.new: 18 functions changed, 40 bytes added, 178 bytes removed, diff: -138 Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c54
1 files changed, 29 insertions, 25 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 94d9f0c63682..3a60aea744ae 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -62,12 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512;
62/* By default, RFC2861 behavior. */ 62/* By default, RFC2861 behavior. */
63int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 63int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
64 64
65static void update_send_head(struct sock *sk, struct tcp_sock *tp, 65static void update_send_head(struct sock *sk, struct sk_buff *skb)
66 struct sk_buff *skb)
67{ 66{
67 struct tcp_sock *tp = tcp_sk(sk);
68
68 tcp_advance_send_head(sk, skb); 69 tcp_advance_send_head(sk, skb);
69 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 70 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
70 tcp_packets_out_inc(sk, tp, skb); 71 tcp_packets_out_inc(sk, skb);
71} 72}
72 73
73/* SND.NXT, if window was not shrunk. 74/* SND.NXT, if window was not shrunk.
@@ -76,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp,
76 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 77 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
77 * invalid. OK, let's make this for now: 78 * invalid. OK, let's make this for now:
78 */ 79 */
79static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) 80static inline __u32 tcp_acceptable_seq(struct sock *sk)
80{ 81{
82 struct tcp_sock *tp = tcp_sk(sk);
83
81 if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) 84 if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
82 return tp->snd_nxt; 85 return tp->snd_nxt;
83 else 86 else
@@ -516,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
516 md5 ? &md5_hash_location : 519 md5 ? &md5_hash_location :
517#endif 520#endif
518 NULL); 521 NULL);
519 TCP_ECN_send(sk, tp, skb, tcp_header_size); 522 TCP_ECN_send(sk, skb, tcp_header_size);
520 } 523 }
521 524
522#ifdef CONFIG_TCP_MD5SIG 525#ifdef CONFIG_TCP_MD5SIG
@@ -927,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
927 930
928/* Congestion window validation. (RFC2861) */ 931/* Congestion window validation. (RFC2861) */
929 932
930static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) 933static void tcp_cwnd_validate(struct sock *sk)
931{ 934{
935 struct tcp_sock *tp = tcp_sk(sk);
932 __u32 packets_out = tp->packets_out; 936 __u32 packets_out = tp->packets_out;
933 937
934 if (packets_out >= tp->snd_cwnd) { 938 if (packets_out >= tp->snd_cwnd) {
@@ -1076,8 +1080,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1076 return cwnd_quota; 1080 return cwnd_quota;
1077} 1081}
1078 1082
1079int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) 1083int tcp_may_send_now(struct sock *sk)
1080{ 1084{
1085 struct tcp_sock *tp = tcp_sk(sk);
1081 struct sk_buff *skb = tcp_send_head(sk); 1086 struct sk_buff *skb = tcp_send_head(sk);
1082 1087
1083 return (skb && 1088 return (skb &&
@@ -1144,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1144 * 1149 *
1145 * This algorithm is from John Heffner. 1150 * This algorithm is from John Heffner.
1146 */ 1151 */
1147static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 1152static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1148{ 1153{
1154 struct tcp_sock *tp = tcp_sk(sk);
1149 const struct inet_connection_sock *icsk = inet_csk(sk); 1155 const struct inet_connection_sock *icsk = inet_csk(sk);
1150 u32 send_win, cong_win, limit, in_flight; 1156 u32 send_win, cong_win, limit, in_flight;
1151 1157
@@ -1324,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk)
1324 /* Decrement cwnd here because we are sending 1330 /* Decrement cwnd here because we are sending
1325 * effectively two packets. */ 1331 * effectively two packets. */
1326 tp->snd_cwnd--; 1332 tp->snd_cwnd--;
1327 update_send_head(sk, tp, nskb); 1333 update_send_head(sk, nskb);
1328 1334
1329 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1335 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1330 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1336 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
@@ -1387,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1387 nonagle : TCP_NAGLE_PUSH)))) 1393 nonagle : TCP_NAGLE_PUSH))))
1388 break; 1394 break;
1389 } else { 1395 } else {
1390 if (tcp_tso_should_defer(sk, tp, skb)) 1396 if (tcp_tso_should_defer(sk, skb))
1391 break; 1397 break;
1392 } 1398 }
1393 1399
@@ -1416,14 +1422,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1416 /* Advance the send_head. This one is sent out. 1422 /* Advance the send_head. This one is sent out.
1417 * This call will increment packets_out. 1423 * This call will increment packets_out.
1418 */ 1424 */
1419 update_send_head(sk, tp, skb); 1425 update_send_head(sk, skb);
1420 1426
1421 tcp_minshall_update(tp, mss_now, skb); 1427 tcp_minshall_update(tp, mss_now, skb);
1422 sent_pkts++; 1428 sent_pkts++;
1423 } 1429 }
1424 1430
1425 if (likely(sent_pkts)) { 1431 if (likely(sent_pkts)) {
1426 tcp_cwnd_validate(sk, tp); 1432 tcp_cwnd_validate(sk);
1427 return 0; 1433 return 0;
1428 } 1434 }
1429 return !tp->packets_out && tcp_send_head(sk); 1435 return !tp->packets_out && tcp_send_head(sk);
@@ -1433,14 +1439,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1433 * TCP_CORK or attempt at coalescing tiny packets. 1439 * TCP_CORK or attempt at coalescing tiny packets.
1434 * The socket must be locked by the caller. 1440 * The socket must be locked by the caller.
1435 */ 1441 */
1436void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, 1442void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1437 unsigned int cur_mss, int nonagle) 1443 int nonagle)
1438{ 1444{
1439 struct sk_buff *skb = tcp_send_head(sk); 1445 struct sk_buff *skb = tcp_send_head(sk);
1440 1446
1441 if (skb) { 1447 if (skb) {
1442 if (tcp_write_xmit(sk, cur_mss, nonagle)) 1448 if (tcp_write_xmit(sk, cur_mss, nonagle))
1443 tcp_check_probe_timer(sk, tp); 1449 tcp_check_probe_timer(sk);
1444 } 1450 }
1445} 1451}
1446 1452
@@ -1484,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1484 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1490 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1485 1491
1486 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { 1492 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1487 update_send_head(sk, tp, skb); 1493 update_send_head(sk, skb);
1488 tcp_cwnd_validate(sk, tp); 1494 tcp_cwnd_validate(sk);
1489 return; 1495 return;
1490 } 1496 }
1491 } 1497 }
@@ -1933,7 +1939,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
1933 * segments to send. 1939 * segments to send.
1934 */ 1940 */
1935 1941
1936 if (tcp_may_send_now(sk, tp)) 1942 if (tcp_may_send_now(sk))
1937 return; 1943 return;
1938 1944
1939 if (tp->forward_skb_hint) { 1945 if (tp->forward_skb_hint) {
@@ -2023,7 +2029,7 @@ void tcp_send_fin(struct sock *sk)
2023 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 2029 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2024 tcp_queue_skb(sk, skb); 2030 tcp_queue_skb(sk, skb);
2025 } 2031 }
2026 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); 2032 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
2027} 2033}
2028 2034
2029/* We get here when a process closes a file descriptor (either due to 2035/* We get here when a process closes a file descriptor (either due to
@@ -2033,7 +2039,6 @@ void tcp_send_fin(struct sock *sk)
2033 */ 2039 */
2034void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2040void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2035{ 2041{
2036 struct tcp_sock *tp = tcp_sk(sk);
2037 struct sk_buff *skb; 2042 struct sk_buff *skb;
2038 2043
2039 /* NOTE: No TCP options attached and we never retransmit this. */ 2044 /* NOTE: No TCP options attached and we never retransmit this. */
@@ -2053,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2053 skb_shinfo(skb)->gso_type = 0; 2058 skb_shinfo(skb)->gso_type = 0;
2054 2059
2055 /* Send it off. */ 2060 /* Send it off. */
2056 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); 2061 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
2057 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 2062 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2058 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2063 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2059 if (tcp_transmit_skb(sk, skb, 0, priority)) 2064 if (tcp_transmit_skb(sk, skb, 0, priority))
@@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk)
2271 skb_reserve(buff, MAX_TCP_HEADER); 2276 skb_reserve(buff, MAX_TCP_HEADER);
2272 2277
2273 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 2278 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
2274 TCP_ECN_send_syn(sk, tp, buff); 2279 TCP_ECN_send_syn(sk, buff);
2275 TCP_SKB_CB(buff)->sacked = 0; 2280 TCP_SKB_CB(buff)->sacked = 0;
2276 skb_shinfo(buff)->gso_segs = 1; 2281 skb_shinfo(buff)->gso_segs = 1;
2277 skb_shinfo(buff)->gso_size = 0; 2282 skb_shinfo(buff)->gso_size = 0;
@@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk)
2363{ 2368{
2364 /* If we have been reset, we may not send again. */ 2369 /* If we have been reset, we may not send again. */
2365 if (sk->sk_state != TCP_CLOSE) { 2370 if (sk->sk_state != TCP_CLOSE) {
2366 struct tcp_sock *tp = tcp_sk(sk);
2367 struct sk_buff *buff; 2371 struct sk_buff *buff;
2368 2372
2369 /* We are not putting this on the write queue, so 2373 /* We are not putting this on the write queue, so
@@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk)
2389 skb_shinfo(buff)->gso_type = 0; 2393 skb_shinfo(buff)->gso_type = 0;
2390 2394
2391 /* Send it off, this clears delayed acks for us. */ 2395 /* Send it off, this clears delayed acks for us. */
2392 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); 2396 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
2393 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2397 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2394 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2398 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2395 } 2399 }
@@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk)
2467 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2471 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2468 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2472 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2469 if (!err) { 2473 if (!err) {
2470 update_send_head(sk, tp, skb); 2474 update_send_head(sk, skb);
2471 } 2475 }
2472 return err; 2476 return err;
2473 } else { 2477 } else {