diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2007-04-21 01:18:02 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:29:34 -0400 |
commit | 9e412ba7632f71259a53085665d4983b78257b7c (patch) | |
tree | b02d6df7e5357a741bf6d52a93e04a52b84f1f90 | |
parent | 38b4da383705394788aa09208917ba200792de4b (diff) |
[TCP]: Sed magic converts func(sk, tp, ...) -> func(sk, ...)
This is (mostly) automated change using magic:
sed -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N'
-e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N'
-e 's|struct sock \*sk,[\n\t ]*struct tcp_sock \*tp\([^{]*\n{\n\)|
struct sock \*sk\1\tstruct tcp_sock *tp = tcp_sk(sk);\n|g'
-e 's|struct sock \*sk, struct tcp_sock \*tp|
struct sock \*sk|g' -e 's|sk, tp\([^-]\)|sk\1|g'
Fixed four unused variable (tp) warnings that were introduced.
In addition, manually added newlines after local variables and
tweaked function arguments positioning.
$ gcc --version
gcc (GCC) 4.1.1 20060525 (Red Hat 4.1.1-1)
...
$ codiff -fV built-in.o.old built-in.o.new
net/ipv4/route.c:
rt_cache_flush | +14
1 function changed, 14 bytes added
net/ipv4/tcp.c:
tcp_setsockopt | -5
tcp_sendpage | -25
tcp_sendmsg | -16
3 functions changed, 46 bytes removed
net/ipv4/tcp_input.c:
tcp_try_undo_recovery | +3
tcp_try_undo_dsack | +2
tcp_mark_head_lost | -12
tcp_ack | -15
tcp_event_data_recv | -32
tcp_rcv_state_process | -10
tcp_rcv_established | +1
7 functions changed, 6 bytes added, 69 bytes removed, diff: -63
net/ipv4/tcp_output.c:
update_send_head | -9
tcp_transmit_skb | +19
tcp_cwnd_validate | +1
tcp_write_wakeup | -17
__tcp_push_pending_frames | -25
tcp_push_one | -8
tcp_send_fin | -4
7 functions changed, 20 bytes added, 63 bytes removed, diff: -43
built-in.o.new:
18 functions changed, 40 bytes added, 178 bytes removed, diff: -138
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 25 | ||||
-rw-r--r-- | include/net/tcp_ecn.h | 11 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 39 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 145 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 54 |
5 files changed, 158 insertions, 116 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index e79803353c83..43910fe3c448 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -420,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |||
420 | 420 | ||
421 | /* tcp_output.c */ | 421 | /* tcp_output.c */ |
422 | 422 | ||
423 | extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | 423 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
424 | unsigned int cur_mss, int nonagle); | 424 | int nonagle); |
425 | extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); | 425 | extern int tcp_may_send_now(struct sock *sk); |
426 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | 426 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); |
427 | extern void tcp_xmit_retransmit_queue(struct sock *); | 427 | extern void tcp_xmit_retransmit_queue(struct sock *); |
428 | extern void tcp_simple_retransmit(struct sock *); | 428 | extern void tcp_simple_retransmit(struct sock *); |
@@ -479,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp) | |||
479 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | 479 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); |
480 | } | 480 | } |
481 | 481 | ||
482 | static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) | 482 | static inline void tcp_fast_path_check(struct sock *sk) |
483 | { | 483 | { |
484 | struct tcp_sock *tp = tcp_sk(sk); | ||
485 | |||
484 | if (skb_queue_empty(&tp->out_of_order_queue) && | 486 | if (skb_queue_empty(&tp->out_of_order_queue) && |
485 | tp->rcv_wnd && | 487 | tp->rcv_wnd && |
486 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | 488 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && |
@@ -591,10 +593,10 @@ static inline void tcp_dec_pcount_approx(__u32 *count, | |||
591 | } | 593 | } |
592 | } | 594 | } |
593 | 595 | ||
594 | static inline void tcp_packets_out_inc(struct sock *sk, | 596 | static inline void tcp_packets_out_inc(struct sock *sk, |
595 | struct tcp_sock *tp, | ||
596 | const struct sk_buff *skb) | 597 | const struct sk_buff *skb) |
597 | { | 598 | { |
599 | struct tcp_sock *tp = tcp_sk(sk); | ||
598 | int orig = tp->packets_out; | 600 | int orig = tp->packets_out; |
599 | 601 | ||
600 | tp->packets_out += tcp_skb_pcount(skb); | 602 | tp->packets_out += tcp_skb_pcount(skb); |
@@ -778,18 +780,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, | |||
778 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 780 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; |
779 | } | 781 | } |
780 | 782 | ||
781 | static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) | 783 | static inline void tcp_check_probe_timer(struct sock *sk) |
782 | { | 784 | { |
785 | struct tcp_sock *tp = tcp_sk(sk); | ||
783 | const struct inet_connection_sock *icsk = inet_csk(sk); | 786 | const struct inet_connection_sock *icsk = inet_csk(sk); |
787 | |||
784 | if (!tp->packets_out && !icsk->icsk_pending) | 788 | if (!tp->packets_out && !icsk->icsk_pending) |
785 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 789 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
786 | icsk->icsk_rto, TCP_RTO_MAX); | 790 | icsk->icsk_rto, TCP_RTO_MAX); |
787 | } | 791 | } |
788 | 792 | ||
789 | static inline void tcp_push_pending_frames(struct sock *sk, | 793 | static inline void tcp_push_pending_frames(struct sock *sk) |
790 | struct tcp_sock *tp) | ||
791 | { | 794 | { |
792 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); | 795 | struct tcp_sock *tp = tcp_sk(sk); |
796 | |||
797 | __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle); | ||
793 | } | 798 | } |
794 | 799 | ||
795 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 800 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) |
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h index b5f7c6ac0880..89eb3e05116d 100644 --- a/include/net/tcp_ecn.h +++ b/include/net/tcp_ecn.h | |||
@@ -27,9 +27,10 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp, | |||
27 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; | 27 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; |
28 | } | 28 | } |
29 | 29 | ||
30 | static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp, | 30 | static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) |
31 | struct sk_buff *skb) | ||
32 | { | 31 | { |
32 | struct tcp_sock *tp = tcp_sk(sk); | ||
33 | |||
33 | tp->ecn_flags = 0; | 34 | tp->ecn_flags = 0; |
34 | if (sysctl_tcp_ecn) { | 35 | if (sysctl_tcp_ecn) { |
35 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; | 36 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; |
@@ -44,9 +45,11 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) | |||
44 | th->ece = 1; | 45 | th->ece = 1; |
45 | } | 46 | } |
46 | 47 | ||
47 | static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, | 48 | static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, |
48 | struct sk_buff *skb, int tcp_header_len) | 49 | int tcp_header_len) |
49 | { | 50 | { |
51 | struct tcp_sock *tp = tcp_sk(sk); | ||
52 | |||
50 | if (tp->ecn_flags & TCP_ECN_OK) { | 53 | if (tp->ecn_flags & TCP_ECN_OK) { |
51 | /* Not-retransmitted data segment: set ECT and inject CWR. */ | 54 | /* Not-retransmitted data segment: set ECT and inject CWR. */ |
52 | if (skb->len != tcp_header_len && | 55 | if (skb->len != tcp_header_len && |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 99ad52c00c96..2cf9a898ce50 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -460,9 +460,9 @@ static inline int forced_push(struct tcp_sock *tp) | |||
460 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); | 460 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); |
461 | } | 461 | } |
462 | 462 | ||
463 | static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, | 463 | static inline void skb_entail(struct sock *sk, struct sk_buff *skb) |
464 | struct sk_buff *skb) | ||
465 | { | 464 | { |
465 | struct tcp_sock *tp = tcp_sk(sk); | ||
466 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); | 466 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
467 | 467 | ||
468 | skb->csum = 0; | 468 | skb->csum = 0; |
@@ -486,15 +486,17 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, | |||
486 | } | 486 | } |
487 | } | 487 | } |
488 | 488 | ||
489 | static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, | 489 | static inline void tcp_push(struct sock *sk, int flags, int mss_now, |
490 | int mss_now, int nonagle) | 490 | int nonagle) |
491 | { | 491 | { |
492 | struct tcp_sock *tp = tcp_sk(sk); | ||
493 | |||
492 | if (tcp_send_head(sk)) { | 494 | if (tcp_send_head(sk)) { |
493 | struct sk_buff *skb = tcp_write_queue_tail(sk); | 495 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
494 | if (!(flags & MSG_MORE) || forced_push(tp)) | 496 | if (!(flags & MSG_MORE) || forced_push(tp)) |
495 | tcp_mark_push(tp, skb); | 497 | tcp_mark_push(tp, skb); |
496 | tcp_mark_urg(tp, flags, skb); | 498 | tcp_mark_urg(tp, flags, skb); |
497 | __tcp_push_pending_frames(sk, tp, mss_now, | 499 | __tcp_push_pending_frames(sk, mss_now, |
498 | (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); | 500 | (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); |
499 | } | 501 | } |
500 | } | 502 | } |
@@ -540,7 +542,7 @@ new_segment: | |||
540 | if (!skb) | 542 | if (!skb) |
541 | goto wait_for_memory; | 543 | goto wait_for_memory; |
542 | 544 | ||
543 | skb_entail(sk, tp, skb); | 545 | skb_entail(sk, skb); |
544 | copy = size_goal; | 546 | copy = size_goal; |
545 | } | 547 | } |
546 | 548 | ||
@@ -586,7 +588,7 @@ new_segment: | |||
586 | 588 | ||
587 | if (forced_push(tp)) { | 589 | if (forced_push(tp)) { |
588 | tcp_mark_push(tp, skb); | 590 | tcp_mark_push(tp, skb); |
589 | __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); | 591 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
590 | } else if (skb == tcp_send_head(sk)) | 592 | } else if (skb == tcp_send_head(sk)) |
591 | tcp_push_one(sk, mss_now); | 593 | tcp_push_one(sk, mss_now); |
592 | continue; | 594 | continue; |
@@ -595,7 +597,7 @@ wait_for_sndbuf: | |||
595 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 597 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
596 | wait_for_memory: | 598 | wait_for_memory: |
597 | if (copied) | 599 | if (copied) |
598 | tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); | 600 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); |
599 | 601 | ||
600 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 602 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
601 | goto do_error; | 603 | goto do_error; |
@@ -606,7 +608,7 @@ wait_for_memory: | |||
606 | 608 | ||
607 | out: | 609 | out: |
608 | if (copied) | 610 | if (copied) |
609 | tcp_push(sk, tp, flags, mss_now, tp->nonagle); | 611 | tcp_push(sk, flags, mss_now, tp->nonagle); |
610 | return copied; | 612 | return copied; |
611 | 613 | ||
612 | do_error: | 614 | do_error: |
@@ -637,8 +639,9 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, | |||
637 | #define TCP_PAGE(sk) (sk->sk_sndmsg_page) | 639 | #define TCP_PAGE(sk) (sk->sk_sndmsg_page) |
638 | #define TCP_OFF(sk) (sk->sk_sndmsg_off) | 640 | #define TCP_OFF(sk) (sk->sk_sndmsg_off) |
639 | 641 | ||
640 | static inline int select_size(struct sock *sk, struct tcp_sock *tp) | 642 | static inline int select_size(struct sock *sk) |
641 | { | 643 | { |
644 | struct tcp_sock *tp = tcp_sk(sk); | ||
642 | int tmp = tp->mss_cache; | 645 | int tmp = tp->mss_cache; |
643 | 646 | ||
644 | if (sk->sk_route_caps & NETIF_F_SG) { | 647 | if (sk->sk_route_caps & NETIF_F_SG) { |
@@ -714,7 +717,7 @@ new_segment: | |||
714 | if (!sk_stream_memory_free(sk)) | 717 | if (!sk_stream_memory_free(sk)) |
715 | goto wait_for_sndbuf; | 718 | goto wait_for_sndbuf; |
716 | 719 | ||
717 | skb = sk_stream_alloc_pskb(sk, select_size(sk, tp), | 720 | skb = sk_stream_alloc_pskb(sk, select_size(sk), |
718 | 0, sk->sk_allocation); | 721 | 0, sk->sk_allocation); |
719 | if (!skb) | 722 | if (!skb) |
720 | goto wait_for_memory; | 723 | goto wait_for_memory; |
@@ -725,7 +728,7 @@ new_segment: | |||
725 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) | 728 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) |
726 | skb->ip_summed = CHECKSUM_PARTIAL; | 729 | skb->ip_summed = CHECKSUM_PARTIAL; |
727 | 730 | ||
728 | skb_entail(sk, tp, skb); | 731 | skb_entail(sk, skb); |
729 | copy = size_goal; | 732 | copy = size_goal; |
730 | } | 733 | } |
731 | 734 | ||
@@ -830,7 +833,7 @@ new_segment: | |||
830 | 833 | ||
831 | if (forced_push(tp)) { | 834 | if (forced_push(tp)) { |
832 | tcp_mark_push(tp, skb); | 835 | tcp_mark_push(tp, skb); |
833 | __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); | 836 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
834 | } else if (skb == tcp_send_head(sk)) | 837 | } else if (skb == tcp_send_head(sk)) |
835 | tcp_push_one(sk, mss_now); | 838 | tcp_push_one(sk, mss_now); |
836 | continue; | 839 | continue; |
@@ -839,7 +842,7 @@ wait_for_sndbuf: | |||
839 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 842 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
840 | wait_for_memory: | 843 | wait_for_memory: |
841 | if (copied) | 844 | if (copied) |
842 | tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); | 845 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); |
843 | 846 | ||
844 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | 847 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
845 | goto do_error; | 848 | goto do_error; |
@@ -851,7 +854,7 @@ wait_for_memory: | |||
851 | 854 | ||
852 | out: | 855 | out: |
853 | if (copied) | 856 | if (copied) |
854 | tcp_push(sk, tp, flags, mss_now, tp->nonagle); | 857 | tcp_push(sk, flags, mss_now, tp->nonagle); |
855 | TCP_CHECK_TIMER(sk); | 858 | TCP_CHECK_TIMER(sk); |
856 | release_sock(sk); | 859 | release_sock(sk); |
857 | return copied; | 860 | return copied; |
@@ -1389,7 +1392,7 @@ do_prequeue: | |||
1389 | skip_copy: | 1392 | skip_copy: |
1390 | if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { | 1393 | if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { |
1391 | tp->urg_data = 0; | 1394 | tp->urg_data = 0; |
1392 | tcp_fast_path_check(sk, tp); | 1395 | tcp_fast_path_check(sk); |
1393 | } | 1396 | } |
1394 | if (used + offset < skb->len) | 1397 | if (used + offset < skb->len) |
1395 | continue; | 1398 | continue; |
@@ -1830,7 +1833,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
1830 | * for currently queued segments. | 1833 | * for currently queued segments. |
1831 | */ | 1834 | */ |
1832 | tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; | 1835 | tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; |
1833 | tcp_push_pending_frames(sk, tp); | 1836 | tcp_push_pending_frames(sk); |
1834 | } else { | 1837 | } else { |
1835 | tp->nonagle &= ~TCP_NAGLE_OFF; | 1838 | tp->nonagle &= ~TCP_NAGLE_OFF; |
1836 | } | 1839 | } |
@@ -1854,7 +1857,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
1854 | tp->nonagle &= ~TCP_NAGLE_CORK; | 1857 | tp->nonagle &= ~TCP_NAGLE_CORK; |
1855 | if (tp->nonagle&TCP_NAGLE_OFF) | 1858 | if (tp->nonagle&TCP_NAGLE_OFF) |
1856 | tp->nonagle |= TCP_NAGLE_PUSH; | 1859 | tp->nonagle |= TCP_NAGLE_PUSH; |
1857 | tcp_push_pending_frames(sk, tp); | 1860 | tcp_push_pending_frames(sk); |
1858 | } | 1861 | } |
1859 | break; | 1862 | break; |
1860 | 1863 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2fbfc2e4209c..633389390788 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -235,9 +235,9 @@ static void tcp_fixup_sndbuf(struct sock *sk) | |||
235 | */ | 235 | */ |
236 | 236 | ||
237 | /* Slow part of check#2. */ | 237 | /* Slow part of check#2. */ |
238 | static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, | 238 | static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) |
239 | const struct sk_buff *skb) | ||
240 | { | 239 | { |
240 | struct tcp_sock *tp = tcp_sk(sk); | ||
241 | /* Optimize this! */ | 241 | /* Optimize this! */ |
242 | int truesize = tcp_win_from_space(skb->truesize)/2; | 242 | int truesize = tcp_win_from_space(skb->truesize)/2; |
243 | int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2; | 243 | int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2; |
@@ -252,9 +252,11 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, | |||
252 | return 0; | 252 | return 0; |
253 | } | 253 | } |
254 | 254 | ||
255 | static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | 255 | static void tcp_grow_window(struct sock *sk, |
256 | struct sk_buff *skb) | 256 | struct sk_buff *skb) |
257 | { | 257 | { |
258 | struct tcp_sock *tp = tcp_sk(sk); | ||
259 | |||
258 | /* Check #1 */ | 260 | /* Check #1 */ |
259 | if (tp->rcv_ssthresh < tp->window_clamp && | 261 | if (tp->rcv_ssthresh < tp->window_clamp && |
260 | (int)tp->rcv_ssthresh < tcp_space(sk) && | 262 | (int)tp->rcv_ssthresh < tcp_space(sk) && |
@@ -267,7 +269,7 @@ static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | |||
267 | if (tcp_win_from_space(skb->truesize) <= skb->len) | 269 | if (tcp_win_from_space(skb->truesize) <= skb->len) |
268 | incr = 2*tp->advmss; | 270 | incr = 2*tp->advmss; |
269 | else | 271 | else |
270 | incr = __tcp_grow_window(sk, tp, skb); | 272 | incr = __tcp_grow_window(sk, skb); |
271 | 273 | ||
272 | if (incr) { | 274 | if (incr) { |
273 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); | 275 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); |
@@ -330,8 +332,9 @@ static void tcp_init_buffer_space(struct sock *sk) | |||
330 | } | 332 | } |
331 | 333 | ||
332 | /* 5. Recalculate window clamp after socket hit its memory bounds. */ | 334 | /* 5. Recalculate window clamp after socket hit its memory bounds. */ |
333 | static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) | 335 | static void tcp_clamp_window(struct sock *sk) |
334 | { | 336 | { |
337 | struct tcp_sock *tp = tcp_sk(sk); | ||
335 | struct inet_connection_sock *icsk = inet_csk(sk); | 338 | struct inet_connection_sock *icsk = inet_csk(sk); |
336 | 339 | ||
337 | icsk->icsk_ack.quick = 0; | 340 | icsk->icsk_ack.quick = 0; |
@@ -503,8 +506,9 @@ new_measure: | |||
503 | * each ACK we send, he increments snd_cwnd and transmits more of his | 506 | * each ACK we send, he increments snd_cwnd and transmits more of his |
504 | * queue. -DaveM | 507 | * queue. -DaveM |
505 | */ | 508 | */ |
506 | static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) | 509 | static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) |
507 | { | 510 | { |
511 | struct tcp_sock *tp = tcp_sk(sk); | ||
508 | struct inet_connection_sock *icsk = inet_csk(sk); | 512 | struct inet_connection_sock *icsk = inet_csk(sk); |
509 | u32 now; | 513 | u32 now; |
510 | 514 | ||
@@ -545,7 +549,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ | |||
545 | TCP_ECN_check_ce(tp, skb); | 549 | TCP_ECN_check_ce(tp, skb); |
546 | 550 | ||
547 | if (skb->len >= 128) | 551 | if (skb->len >= 128) |
548 | tcp_grow_window(sk, tp, skb); | 552 | tcp_grow_window(sk, skb); |
549 | } | 553 | } |
550 | 554 | ||
551 | /* Called to compute a smoothed rtt estimate. The data fed to this | 555 | /* Called to compute a smoothed rtt estimate. The data fed to this |
@@ -1541,8 +1545,10 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) | |||
1541 | return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); | 1545 | return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); |
1542 | } | 1546 | } |
1543 | 1547 | ||
1544 | static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) | 1548 | static inline int tcp_head_timedout(struct sock *sk) |
1545 | { | 1549 | { |
1550 | struct tcp_sock *tp = tcp_sk(sk); | ||
1551 | |||
1546 | return tp->packets_out && | 1552 | return tp->packets_out && |
1547 | tcp_skb_timedout(sk, tcp_write_queue_head(sk)); | 1553 | tcp_skb_timedout(sk, tcp_write_queue_head(sk)); |
1548 | } | 1554 | } |
@@ -1640,8 +1646,9 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) | |||
1640 | * Main question: may we further continue forward transmission | 1646 | * Main question: may we further continue forward transmission |
1641 | * with the same cwnd? | 1647 | * with the same cwnd? |
1642 | */ | 1648 | */ |
1643 | static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) | 1649 | static int tcp_time_to_recover(struct sock *sk) |
1644 | { | 1650 | { |
1651 | struct tcp_sock *tp = tcp_sk(sk); | ||
1645 | __u32 packets_out; | 1652 | __u32 packets_out; |
1646 | 1653 | ||
1647 | /* Do not perform any recovery during FRTO algorithm */ | 1654 | /* Do not perform any recovery during FRTO algorithm */ |
@@ -1659,7 +1666,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) | |||
1659 | /* Trick#3 : when we use RFC2988 timer restart, fast | 1666 | /* Trick#3 : when we use RFC2988 timer restart, fast |
1660 | * retransmit can be triggered by timeout of queue head. | 1667 | * retransmit can be triggered by timeout of queue head. |
1661 | */ | 1668 | */ |
1662 | if (tcp_head_timedout(sk, tp)) | 1669 | if (tcp_head_timedout(sk)) |
1663 | return 1; | 1670 | return 1; |
1664 | 1671 | ||
1665 | /* Trick#4: It is still not OK... But will it be useful to delay | 1672 | /* Trick#4: It is still not OK... But will it be useful to delay |
@@ -1668,7 +1675,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) | |||
1668 | packets_out = tp->packets_out; | 1675 | packets_out = tp->packets_out; |
1669 | if (packets_out <= tp->reordering && | 1676 | if (packets_out <= tp->reordering && |
1670 | tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && | 1677 | tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && |
1671 | !tcp_may_send_now(sk, tp)) { | 1678 | !tcp_may_send_now(sk)) { |
1672 | /* We have nothing to send. This connection is limited | 1679 | /* We have nothing to send. This connection is limited |
1673 | * either by receiver window or by application. | 1680 | * either by receiver window or by application. |
1674 | */ | 1681 | */ |
@@ -1708,8 +1715,10 @@ static void tcp_add_reno_sack(struct sock *sk) | |||
1708 | 1715 | ||
1709 | /* Account for ACK, ACKing some data in Reno Recovery phase. */ | 1716 | /* Account for ACK, ACKing some data in Reno Recovery phase. */ |
1710 | 1717 | ||
1711 | static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked) | 1718 | static void tcp_remove_reno_sacks(struct sock *sk, int acked) |
1712 | { | 1719 | { |
1720 | struct tcp_sock *tp = tcp_sk(sk); | ||
1721 | |||
1713 | if (acked > 0) { | 1722 | if (acked > 0) { |
1714 | /* One ACK acked hole. The rest eat duplicate ACKs. */ | 1723 | /* One ACK acked hole. The rest eat duplicate ACKs. */ |
1715 | if (acked-1 >= tp->sacked_out) | 1724 | if (acked-1 >= tp->sacked_out) |
@@ -1728,9 +1737,10 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp) | |||
1728 | } | 1737 | } |
1729 | 1738 | ||
1730 | /* Mark head of queue up as lost. */ | 1739 | /* Mark head of queue up as lost. */ |
1731 | static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, | 1740 | static void tcp_mark_head_lost(struct sock *sk, |
1732 | int packets, u32 high_seq) | 1741 | int packets, u32 high_seq) |
1733 | { | 1742 | { |
1743 | struct tcp_sock *tp = tcp_sk(sk); | ||
1734 | struct sk_buff *skb; | 1744 | struct sk_buff *skb; |
1735 | int cnt; | 1745 | int cnt; |
1736 | 1746 | ||
@@ -1771,15 +1781,17 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, | |||
1771 | 1781 | ||
1772 | /* Account newly detected lost packet(s) */ | 1782 | /* Account newly detected lost packet(s) */ |
1773 | 1783 | ||
1774 | static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | 1784 | static void tcp_update_scoreboard(struct sock *sk) |
1775 | { | 1785 | { |
1786 | struct tcp_sock *tp = tcp_sk(sk); | ||
1787 | |||
1776 | if (IsFack(tp)) { | 1788 | if (IsFack(tp)) { |
1777 | int lost = tp->fackets_out - tp->reordering; | 1789 | int lost = tp->fackets_out - tp->reordering; |
1778 | if (lost <= 0) | 1790 | if (lost <= 0) |
1779 | lost = 1; | 1791 | lost = 1; |
1780 | tcp_mark_head_lost(sk, tp, lost, tp->high_seq); | 1792 | tcp_mark_head_lost(sk, lost, tp->high_seq); |
1781 | } else { | 1793 | } else { |
1782 | tcp_mark_head_lost(sk, tp, 1, tp->high_seq); | 1794 | tcp_mark_head_lost(sk, 1, tp->high_seq); |
1783 | } | 1795 | } |
1784 | 1796 | ||
1785 | /* New heuristics: it is possible only after we switched | 1797 | /* New heuristics: it is possible only after we switched |
@@ -1787,7 +1799,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
1787 | * Hence, we can detect timed out packets during fast | 1799 | * Hence, we can detect timed out packets during fast |
1788 | * retransmit without falling to slow start. | 1800 | * retransmit without falling to slow start. |
1789 | */ | 1801 | */ |
1790 | if (!IsReno(tp) && tcp_head_timedout(sk, tp)) { | 1802 | if (!IsReno(tp) && tcp_head_timedout(sk)) { |
1791 | struct sk_buff *skb; | 1803 | struct sk_buff *skb; |
1792 | 1804 | ||
1793 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint | 1805 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint |
@@ -1867,9 +1879,11 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp) | |||
1867 | /* Undo procedures. */ | 1879 | /* Undo procedures. */ |
1868 | 1880 | ||
1869 | #if FASTRETRANS_DEBUG > 1 | 1881 | #if FASTRETRANS_DEBUG > 1 |
1870 | static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg) | 1882 | static void DBGUNDO(struct sock *sk, const char *msg) |
1871 | { | 1883 | { |
1884 | struct tcp_sock *tp = tcp_sk(sk); | ||
1872 | struct inet_sock *inet = inet_sk(sk); | 1885 | struct inet_sock *inet = inet_sk(sk); |
1886 | |||
1873 | printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", | 1887 | printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", |
1874 | msg, | 1888 | msg, |
1875 | NIPQUAD(inet->daddr), ntohs(inet->dport), | 1889 | NIPQUAD(inet->daddr), ntohs(inet->dport), |
@@ -1915,13 +1929,15 @@ static inline int tcp_may_undo(struct tcp_sock *tp) | |||
1915 | } | 1929 | } |
1916 | 1930 | ||
1917 | /* People celebrate: "We love our President!" */ | 1931 | /* People celebrate: "We love our President!" */ |
1918 | static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) | 1932 | static int tcp_try_undo_recovery(struct sock *sk) |
1919 | { | 1933 | { |
1934 | struct tcp_sock *tp = tcp_sk(sk); | ||
1935 | |||
1920 | if (tcp_may_undo(tp)) { | 1936 | if (tcp_may_undo(tp)) { |
1921 | /* Happy end! We did not retransmit anything | 1937 | /* Happy end! We did not retransmit anything |
1922 | * or our original transmission succeeded. | 1938 | * or our original transmission succeeded. |
1923 | */ | 1939 | */ |
1924 | DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); | 1940 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); |
1925 | tcp_undo_cwr(sk, 1); | 1941 | tcp_undo_cwr(sk, 1); |
1926 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) | 1942 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) |
1927 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); | 1943 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); |
@@ -1941,10 +1957,12 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) | |||
1941 | } | 1957 | } |
1942 | 1958 | ||
1943 | /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ | 1959 | /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ |
1944 | static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) | 1960 | static void tcp_try_undo_dsack(struct sock *sk) |
1945 | { | 1961 | { |
1962 | struct tcp_sock *tp = tcp_sk(sk); | ||
1963 | |||
1946 | if (tp->undo_marker && !tp->undo_retrans) { | 1964 | if (tp->undo_marker && !tp->undo_retrans) { |
1947 | DBGUNDO(sk, tp, "D-SACK"); | 1965 | DBGUNDO(sk, "D-SACK"); |
1948 | tcp_undo_cwr(sk, 1); | 1966 | tcp_undo_cwr(sk, 1); |
1949 | tp->undo_marker = 0; | 1967 | tp->undo_marker = 0; |
1950 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); | 1968 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); |
@@ -1953,9 +1971,9 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) | |||
1953 | 1971 | ||
1954 | /* Undo during fast recovery after partial ACK. */ | 1972 | /* Undo during fast recovery after partial ACK. */ |
1955 | 1973 | ||
1956 | static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, | 1974 | static int tcp_try_undo_partial(struct sock *sk, int acked) |
1957 | int acked) | ||
1958 | { | 1975 | { |
1976 | struct tcp_sock *tp = tcp_sk(sk); | ||
1959 | /* Partial ACK arrived. Force Hoe's retransmit. */ | 1977 | /* Partial ACK arrived. Force Hoe's retransmit. */ |
1960 | int failed = IsReno(tp) || tp->fackets_out>tp->reordering; | 1978 | int failed = IsReno(tp) || tp->fackets_out>tp->reordering; |
1961 | 1979 | ||
@@ -1968,7 +1986,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, | |||
1968 | 1986 | ||
1969 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); | 1987 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); |
1970 | 1988 | ||
1971 | DBGUNDO(sk, tp, "Hoe"); | 1989 | DBGUNDO(sk, "Hoe"); |
1972 | tcp_undo_cwr(sk, 0); | 1990 | tcp_undo_cwr(sk, 0); |
1973 | NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); | 1991 | NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); |
1974 | 1992 | ||
@@ -1982,8 +2000,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, | |||
1982 | } | 2000 | } |
1983 | 2001 | ||
1984 | /* Undo during loss recovery after partial ACK. */ | 2002 | /* Undo during loss recovery after partial ACK. */ |
1985 | static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) | 2003 | static int tcp_try_undo_loss(struct sock *sk) |
1986 | { | 2004 | { |
2005 | struct tcp_sock *tp = tcp_sk(sk); | ||
2006 | |||
1987 | if (tcp_may_undo(tp)) { | 2007 | if (tcp_may_undo(tp)) { |
1988 | struct sk_buff *skb; | 2008 | struct sk_buff *skb; |
1989 | tcp_for_write_queue(skb, sk) { | 2009 | tcp_for_write_queue(skb, sk) { |
@@ -1994,7 +2014,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) | |||
1994 | 2014 | ||
1995 | clear_all_retrans_hints(tp); | 2015 | clear_all_retrans_hints(tp); |
1996 | 2016 | ||
1997 | DBGUNDO(sk, tp, "partial loss"); | 2017 | DBGUNDO(sk, "partial loss"); |
1998 | tp->lost_out = 0; | 2018 | tp->lost_out = 0; |
1999 | tp->left_out = tp->sacked_out; | 2019 | tp->left_out = tp->sacked_out; |
2000 | tcp_undo_cwr(sk, 1); | 2020 | tcp_undo_cwr(sk, 1); |
@@ -2016,8 +2036,10 @@ static inline void tcp_complete_cwr(struct sock *sk) | |||
2016 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); | 2036 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); |
2017 | } | 2037 | } |
2018 | 2038 | ||
2019 | static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) | 2039 | static void tcp_try_to_open(struct sock *sk, int flag) |
2020 | { | 2040 | { |
2041 | struct tcp_sock *tp = tcp_sk(sk); | ||
2042 | |||
2021 | tp->left_out = tp->sacked_out; | 2043 | tp->left_out = tp->sacked_out; |
2022 | 2044 | ||
2023 | if (tp->retrans_out == 0) | 2045 | if (tp->retrans_out == 0) |
@@ -2111,7 +2133,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2111 | before(tp->snd_una, tp->high_seq) && | 2133 | before(tp->snd_una, tp->high_seq) && |
2112 | icsk->icsk_ca_state != TCP_CA_Open && | 2134 | icsk->icsk_ca_state != TCP_CA_Open && |
2113 | tp->fackets_out > tp->reordering) { | 2135 | tp->fackets_out > tp->reordering) { |
2114 | tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); | 2136 | tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq); |
2115 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); | 2137 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); |
2116 | } | 2138 | } |
2117 | 2139 | ||
@@ -2127,7 +2149,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2127 | switch (icsk->icsk_ca_state) { | 2149 | switch (icsk->icsk_ca_state) { |
2128 | case TCP_CA_Loss: | 2150 | case TCP_CA_Loss: |
2129 | icsk->icsk_retransmits = 0; | 2151 | icsk->icsk_retransmits = 0; |
2130 | if (tcp_try_undo_recovery(sk, tp)) | 2152 | if (tcp_try_undo_recovery(sk)) |
2131 | return; | 2153 | return; |
2132 | break; | 2154 | break; |
2133 | 2155 | ||
@@ -2141,7 +2163,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2141 | break; | 2163 | break; |
2142 | 2164 | ||
2143 | case TCP_CA_Disorder: | 2165 | case TCP_CA_Disorder: |
2144 | tcp_try_undo_dsack(sk, tp); | 2166 | tcp_try_undo_dsack(sk); |
2145 | if (!tp->undo_marker || | 2167 | if (!tp->undo_marker || |
2146 | /* For SACK case do not Open to allow to undo | 2168 | /* For SACK case do not Open to allow to undo |
2147 | * catching for all duplicate ACKs. */ | 2169 | * catching for all duplicate ACKs. */ |
@@ -2154,7 +2176,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2154 | case TCP_CA_Recovery: | 2176 | case TCP_CA_Recovery: |
2155 | if (IsReno(tp)) | 2177 | if (IsReno(tp)) |
2156 | tcp_reset_reno_sack(tp); | 2178 | tcp_reset_reno_sack(tp); |
2157 | if (tcp_try_undo_recovery(sk, tp)) | 2179 | if (tcp_try_undo_recovery(sk)) |
2158 | return; | 2180 | return; |
2159 | tcp_complete_cwr(sk); | 2181 | tcp_complete_cwr(sk); |
2160 | break; | 2182 | break; |
@@ -2170,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2170 | } else { | 2192 | } else { |
2171 | int acked = prior_packets - tp->packets_out; | 2193 | int acked = prior_packets - tp->packets_out; |
2172 | if (IsReno(tp)) | 2194 | if (IsReno(tp)) |
2173 | tcp_remove_reno_sacks(sk, tp, acked); | 2195 | tcp_remove_reno_sacks(sk, acked); |
2174 | is_dupack = tcp_try_undo_partial(sk, tp, acked); | 2196 | is_dupack = tcp_try_undo_partial(sk, acked); |
2175 | } | 2197 | } |
2176 | break; | 2198 | break; |
2177 | case TCP_CA_Loss: | 2199 | case TCP_CA_Loss: |
2178 | if (flag&FLAG_DATA_ACKED) | 2200 | if (flag&FLAG_DATA_ACKED) |
2179 | icsk->icsk_retransmits = 0; | 2201 | icsk->icsk_retransmits = 0; |
2180 | if (!tcp_try_undo_loss(sk, tp)) { | 2202 | if (!tcp_try_undo_loss(sk)) { |
2181 | tcp_moderate_cwnd(tp); | 2203 | tcp_moderate_cwnd(tp); |
2182 | tcp_xmit_retransmit_queue(sk); | 2204 | tcp_xmit_retransmit_queue(sk); |
2183 | return; | 2205 | return; |
@@ -2194,10 +2216,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2194 | } | 2216 | } |
2195 | 2217 | ||
2196 | if (icsk->icsk_ca_state == TCP_CA_Disorder) | 2218 | if (icsk->icsk_ca_state == TCP_CA_Disorder) |
2197 | tcp_try_undo_dsack(sk, tp); | 2219 | tcp_try_undo_dsack(sk); |
2198 | 2220 | ||
2199 | if (!tcp_time_to_recover(sk, tp)) { | 2221 | if (!tcp_time_to_recover(sk)) { |
2200 | tcp_try_to_open(sk, tp, flag); | 2222 | tcp_try_to_open(sk, flag); |
2201 | return; | 2223 | return; |
2202 | } | 2224 | } |
2203 | 2225 | ||
@@ -2236,8 +2258,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2236 | tcp_set_ca_state(sk, TCP_CA_Recovery); | 2258 | tcp_set_ca_state(sk, TCP_CA_Recovery); |
2237 | } | 2259 | } |
2238 | 2260 | ||
2239 | if (is_dupack || tcp_head_timedout(sk, tp)) | 2261 | if (is_dupack || tcp_head_timedout(sk)) |
2240 | tcp_update_scoreboard(sk, tp); | 2262 | tcp_update_scoreboard(sk); |
2241 | tcp_cwnd_down(sk); | 2263 | tcp_cwnd_down(sk); |
2242 | tcp_xmit_retransmit_queue(sk); | 2264 | tcp_xmit_retransmit_queue(sk); |
2243 | } | 2265 | } |
@@ -2313,8 +2335,10 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
2313 | * RFC2988 recommends to restart timer to now+rto. | 2335 | * RFC2988 recommends to restart timer to now+rto. |
2314 | */ | 2336 | */ |
2315 | 2337 | ||
2316 | static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) | 2338 | static void tcp_ack_packets_out(struct sock *sk) |
2317 | { | 2339 | { |
2340 | struct tcp_sock *tp = tcp_sk(sk); | ||
2341 | |||
2318 | if (!tp->packets_out) { | 2342 | if (!tp->packets_out) { |
2319 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); | 2343 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); |
2320 | } else { | 2344 | } else { |
@@ -2471,7 +2495,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) | |||
2471 | 2495 | ||
2472 | if (acked&FLAG_ACKED) { | 2496 | if (acked&FLAG_ACKED) { |
2473 | tcp_ack_update_rtt(sk, acked, seq_rtt); | 2497 | tcp_ack_update_rtt(sk, acked, seq_rtt); |
2474 | tcp_ack_packets_out(sk, tp); | 2498 | tcp_ack_packets_out(sk); |
2475 | if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED)) | 2499 | if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED)) |
2476 | (*rtt_sample)(sk, tcp_usrtt(&tv)); | 2500 | (*rtt_sample)(sk, tcp_usrtt(&tv)); |
2477 | 2501 | ||
@@ -2556,9 +2580,10 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack | |||
2556 | * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 | 2580 | * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 |
2557 | * and in FreeBSD. NetBSD's one is even worse.) is wrong. | 2581 | * and in FreeBSD. NetBSD's one is even worse.) is wrong. |
2558 | */ | 2582 | */ |
2559 | static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, | 2583 | static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack, |
2560 | struct sk_buff *skb, u32 ack, u32 ack_seq) | 2584 | u32 ack_seq) |
2561 | { | 2585 | { |
2586 | struct tcp_sock *tp = tcp_sk(sk); | ||
2562 | int flag = 0; | 2587 | int flag = 0; |
2563 | u32 nwin = ntohs(tcp_hdr(skb)->window); | 2588 | u32 nwin = ntohs(tcp_hdr(skb)->window); |
2564 | 2589 | ||
@@ -2576,7 +2601,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, | |||
2576 | * fast path is recovered for sending TCP. | 2601 | * fast path is recovered for sending TCP. |
2577 | */ | 2602 | */ |
2578 | tp->pred_flags = 0; | 2603 | tp->pred_flags = 0; |
2579 | tcp_fast_path_check(sk, tp); | 2604 | tcp_fast_path_check(sk); |
2580 | 2605 | ||
2581 | if (nwin > tp->max_window) { | 2606 | if (nwin > tp->max_window) { |
2582 | tp->max_window = nwin; | 2607 | tp->max_window = nwin; |
@@ -2762,7 +2787,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
2762 | else | 2787 | else |
2763 | NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); | 2788 | NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); |
2764 | 2789 | ||
2765 | flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq); | 2790 | flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); |
2766 | 2791 | ||
2767 | if (TCP_SKB_CB(skb)->sacked) | 2792 | if (TCP_SKB_CB(skb)->sacked) |
2768 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); | 2793 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); |
@@ -3426,7 +3451,7 @@ queue_and_out: | |||
3426 | } | 3451 | } |
3427 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 3452 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
3428 | if (skb->len) | 3453 | if (skb->len) |
3429 | tcp_event_data_recv(sk, tp, skb); | 3454 | tcp_event_data_recv(sk, skb); |
3430 | if (th->fin) | 3455 | if (th->fin) |
3431 | tcp_fin(skb, sk, th); | 3456 | tcp_fin(skb, sk, th); |
3432 | 3457 | ||
@@ -3443,7 +3468,7 @@ queue_and_out: | |||
3443 | if (tp->rx_opt.num_sacks) | 3468 | if (tp->rx_opt.num_sacks) |
3444 | tcp_sack_remove(tp); | 3469 | tcp_sack_remove(tp); |
3445 | 3470 | ||
3446 | tcp_fast_path_check(sk, tp); | 3471 | tcp_fast_path_check(sk); |
3447 | 3472 | ||
3448 | if (eaten > 0) | 3473 | if (eaten > 0) |
3449 | __kfree_skb(skb); | 3474 | __kfree_skb(skb); |
@@ -3734,7 +3759,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
3734 | NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); | 3759 | NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); |
3735 | 3760 | ||
3736 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) | 3761 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
3737 | tcp_clamp_window(sk, tp); | 3762 | tcp_clamp_window(sk); |
3738 | else if (tcp_memory_pressure) | 3763 | else if (tcp_memory_pressure) |
3739 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); | 3764 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
3740 | 3765 | ||
@@ -3803,8 +3828,10 @@ void tcp_cwnd_application_limited(struct sock *sk) | |||
3803 | tp->snd_cwnd_stamp = tcp_time_stamp; | 3828 | tp->snd_cwnd_stamp = tcp_time_stamp; |
3804 | } | 3829 | } |
3805 | 3830 | ||
3806 | static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp) | 3831 | static int tcp_should_expand_sndbuf(struct sock *sk) |
3807 | { | 3832 | { |
3833 | struct tcp_sock *tp = tcp_sk(sk); | ||
3834 | |||
3808 | /* If the user specified a specific send buffer setting, do | 3835 | /* If the user specified a specific send buffer setting, do |
3809 | * not modify it. | 3836 | * not modify it. |
3810 | */ | 3837 | */ |
@@ -3836,7 +3863,7 @@ static void tcp_new_space(struct sock *sk) | |||
3836 | { | 3863 | { |
3837 | struct tcp_sock *tp = tcp_sk(sk); | 3864 | struct tcp_sock *tp = tcp_sk(sk); |
3838 | 3865 | ||
3839 | if (tcp_should_expand_sndbuf(sk, tp)) { | 3866 | if (tcp_should_expand_sndbuf(sk)) { |
3840 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + | 3867 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + |
3841 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), | 3868 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), |
3842 | demanded = max_t(unsigned int, tp->snd_cwnd, | 3869 | demanded = max_t(unsigned int, tp->snd_cwnd, |
@@ -3860,9 +3887,9 @@ static void tcp_check_space(struct sock *sk) | |||
3860 | } | 3887 | } |
3861 | } | 3888 | } |
3862 | 3889 | ||
3863 | static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp) | 3890 | static inline void tcp_data_snd_check(struct sock *sk) |
3864 | { | 3891 | { |
3865 | tcp_push_pending_frames(sk, tp); | 3892 | tcp_push_pending_frames(sk); |
3866 | tcp_check_space(sk); | 3893 | tcp_check_space(sk); |
3867 | } | 3894 | } |
3868 | 3895 | ||
@@ -4196,7 +4223,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4196 | */ | 4223 | */ |
4197 | tcp_ack(sk, skb, 0); | 4224 | tcp_ack(sk, skb, 0); |
4198 | __kfree_skb(skb); | 4225 | __kfree_skb(skb); |
4199 | tcp_data_snd_check(sk, tp); | 4226 | tcp_data_snd_check(sk); |
4200 | return 0; | 4227 | return 0; |
4201 | } else { /* Header too small */ | 4228 | } else { /* Header too small */ |
4202 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 4229 | TCP_INC_STATS_BH(TCP_MIB_INERRS); |
@@ -4267,12 +4294,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4267 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 4294 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
4268 | } | 4295 | } |
4269 | 4296 | ||
4270 | tcp_event_data_recv(sk, tp, skb); | 4297 | tcp_event_data_recv(sk, skb); |
4271 | 4298 | ||
4272 | if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { | 4299 | if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { |
4273 | /* Well, only one small jumplet in fast path... */ | 4300 | /* Well, only one small jumplet in fast path... */ |
4274 | tcp_ack(sk, skb, FLAG_DATA); | 4301 | tcp_ack(sk, skb, FLAG_DATA); |
4275 | tcp_data_snd_check(sk, tp); | 4302 | tcp_data_snd_check(sk); |
4276 | if (!inet_csk_ack_scheduled(sk)) | 4303 | if (!inet_csk_ack_scheduled(sk)) |
4277 | goto no_ack; | 4304 | goto no_ack; |
4278 | } | 4305 | } |
@@ -4355,7 +4382,7 @@ step5: | |||
4355 | /* step 7: process the segment text */ | 4382 | /* step 7: process the segment text */ |
4356 | tcp_data_queue(sk, skb); | 4383 | tcp_data_queue(sk, skb); |
4357 | 4384 | ||
4358 | tcp_data_snd_check(sk, tp); | 4385 | tcp_data_snd_check(sk); |
4359 | tcp_ack_snd_check(sk); | 4386 | tcp_ack_snd_check(sk); |
4360 | return 0; | 4387 | return 0; |
4361 | 4388 | ||
@@ -4672,7 +4699,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4672 | /* Do step6 onward by hand. */ | 4699 | /* Do step6 onward by hand. */ |
4673 | tcp_urg(sk, skb, th); | 4700 | tcp_urg(sk, skb, th); |
4674 | __kfree_skb(skb); | 4701 | __kfree_skb(skb); |
4675 | tcp_data_snd_check(sk, tp); | 4702 | tcp_data_snd_check(sk); |
4676 | return 0; | 4703 | return 0; |
4677 | } | 4704 | } |
4678 | 4705 | ||
@@ -4864,7 +4891,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4864 | 4891 | ||
4865 | /* tcp_data could move socket to TIME-WAIT */ | 4892 | /* tcp_data could move socket to TIME-WAIT */ |
4866 | if (sk->sk_state != TCP_CLOSE) { | 4893 | if (sk->sk_state != TCP_CLOSE) { |
4867 | tcp_data_snd_check(sk, tp); | 4894 | tcp_data_snd_check(sk); |
4868 | tcp_ack_snd_check(sk); | 4895 | tcp_ack_snd_check(sk); |
4869 | } | 4896 | } |
4870 | 4897 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 94d9f0c63682..3a60aea744ae 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -62,12 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512; | |||
62 | /* By default, RFC2861 behavior. */ | 62 | /* By default, RFC2861 behavior. */ |
63 | int sysctl_tcp_slow_start_after_idle __read_mostly = 1; | 63 | int sysctl_tcp_slow_start_after_idle __read_mostly = 1; |
64 | 64 | ||
65 | static void update_send_head(struct sock *sk, struct tcp_sock *tp, | 65 | static void update_send_head(struct sock *sk, struct sk_buff *skb) |
66 | struct sk_buff *skb) | ||
67 | { | 66 | { |
67 | struct tcp_sock *tp = tcp_sk(sk); | ||
68 | |||
68 | tcp_advance_send_head(sk, skb); | 69 | tcp_advance_send_head(sk, skb); |
69 | tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; | 70 | tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; |
70 | tcp_packets_out_inc(sk, tp, skb); | 71 | tcp_packets_out_inc(sk, skb); |
71 | } | 72 | } |
72 | 73 | ||
73 | /* SND.NXT, if window was not shrunk. | 74 | /* SND.NXT, if window was not shrunk. |
@@ -76,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp, | |||
76 | * Anything in between SND.UNA...SND.UNA+SND.WND also can be already | 77 | * Anything in between SND.UNA...SND.UNA+SND.WND also can be already |
77 | * invalid. OK, let's make this for now: | 78 | * invalid. OK, let's make this for now: |
78 | */ | 79 | */ |
79 | static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) | 80 | static inline __u32 tcp_acceptable_seq(struct sock *sk) |
80 | { | 81 | { |
82 | struct tcp_sock *tp = tcp_sk(sk); | ||
83 | |||
81 | if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) | 84 | if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) |
82 | return tp->snd_nxt; | 85 | return tp->snd_nxt; |
83 | else | 86 | else |
@@ -516,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
516 | md5 ? &md5_hash_location : | 519 | md5 ? &md5_hash_location : |
517 | #endif | 520 | #endif |
518 | NULL); | 521 | NULL); |
519 | TCP_ECN_send(sk, tp, skb, tcp_header_size); | 522 | TCP_ECN_send(sk, skb, tcp_header_size); |
520 | } | 523 | } |
521 | 524 | ||
522 | #ifdef CONFIG_TCP_MD5SIG | 525 | #ifdef CONFIG_TCP_MD5SIG |
@@ -927,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
927 | 930 | ||
928 | /* Congestion window validation. (RFC2861) */ | 931 | /* Congestion window validation. (RFC2861) */ |
929 | 932 | ||
930 | static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | 933 | static void tcp_cwnd_validate(struct sock *sk) |
931 | { | 934 | { |
935 | struct tcp_sock *tp = tcp_sk(sk); | ||
932 | __u32 packets_out = tp->packets_out; | 936 | __u32 packets_out = tp->packets_out; |
933 | 937 | ||
934 | if (packets_out >= tp->snd_cwnd) { | 938 | if (packets_out >= tp->snd_cwnd) { |
@@ -1076,8 +1080,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, | |||
1076 | return cwnd_quota; | 1080 | return cwnd_quota; |
1077 | } | 1081 | } |
1078 | 1082 | ||
1079 | int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) | 1083 | int tcp_may_send_now(struct sock *sk) |
1080 | { | 1084 | { |
1085 | struct tcp_sock *tp = tcp_sk(sk); | ||
1081 | struct sk_buff *skb = tcp_send_head(sk); | 1086 | struct sk_buff *skb = tcp_send_head(sk); |
1082 | 1087 | ||
1083 | return (skb && | 1088 | return (skb && |
@@ -1144,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
1144 | * | 1149 | * |
1145 | * This algorithm is from John Heffner. | 1150 | * This algorithm is from John Heffner. |
1146 | */ | 1151 | */ |
1147 | static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) | 1152 | static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) |
1148 | { | 1153 | { |
1154 | struct tcp_sock *tp = tcp_sk(sk); | ||
1149 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1155 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1150 | u32 send_win, cong_win, limit, in_flight; | 1156 | u32 send_win, cong_win, limit, in_flight; |
1151 | 1157 | ||
@@ -1324,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1324 | /* Decrement cwnd here because we are sending | 1330 | /* Decrement cwnd here because we are sending |
1325 | * effectively two packets. */ | 1331 | * effectively two packets. */ |
1326 | tp->snd_cwnd--; | 1332 | tp->snd_cwnd--; |
1327 | update_send_head(sk, tp, nskb); | 1333 | update_send_head(sk, nskb); |
1328 | 1334 | ||
1329 | icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); | 1335 | icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); |
1330 | tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; | 1336 | tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; |
@@ -1387,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
1387 | nonagle : TCP_NAGLE_PUSH)))) | 1393 | nonagle : TCP_NAGLE_PUSH)))) |
1388 | break; | 1394 | break; |
1389 | } else { | 1395 | } else { |
1390 | if (tcp_tso_should_defer(sk, tp, skb)) | 1396 | if (tcp_tso_should_defer(sk, skb)) |
1391 | break; | 1397 | break; |
1392 | } | 1398 | } |
1393 | 1399 | ||
@@ -1416,14 +1422,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
1416 | /* Advance the send_head. This one is sent out. | 1422 | /* Advance the send_head. This one is sent out. |
1417 | * This call will increment packets_out. | 1423 | * This call will increment packets_out. |
1418 | */ | 1424 | */ |
1419 | update_send_head(sk, tp, skb); | 1425 | update_send_head(sk, skb); |
1420 | 1426 | ||
1421 | tcp_minshall_update(tp, mss_now, skb); | 1427 | tcp_minshall_update(tp, mss_now, skb); |
1422 | sent_pkts++; | 1428 | sent_pkts++; |
1423 | } | 1429 | } |
1424 | 1430 | ||
1425 | if (likely(sent_pkts)) { | 1431 | if (likely(sent_pkts)) { |
1426 | tcp_cwnd_validate(sk, tp); | 1432 | tcp_cwnd_validate(sk); |
1427 | return 0; | 1433 | return 0; |
1428 | } | 1434 | } |
1429 | return !tp->packets_out && tcp_send_head(sk); | 1435 | return !tp->packets_out && tcp_send_head(sk); |
@@ -1433,14 +1439,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) | |||
1433 | * TCP_CORK or attempt at coalescing tiny packets. | 1439 | * TCP_CORK or attempt at coalescing tiny packets. |
1434 | * The socket must be locked by the caller. | 1440 | * The socket must be locked by the caller. |
1435 | */ | 1441 | */ |
1436 | void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | 1442 | void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
1437 | unsigned int cur_mss, int nonagle) | 1443 | int nonagle) |
1438 | { | 1444 | { |
1439 | struct sk_buff *skb = tcp_send_head(sk); | 1445 | struct sk_buff *skb = tcp_send_head(sk); |
1440 | 1446 | ||
1441 | if (skb) { | 1447 | if (skb) { |
1442 | if (tcp_write_xmit(sk, cur_mss, nonagle)) | 1448 | if (tcp_write_xmit(sk, cur_mss, nonagle)) |
1443 | tcp_check_probe_timer(sk, tp); | 1449 | tcp_check_probe_timer(sk); |
1444 | } | 1450 | } |
1445 | } | 1451 | } |
1446 | 1452 | ||
@@ -1484,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
1484 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 1490 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
1485 | 1491 | ||
1486 | if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { | 1492 | if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { |
1487 | update_send_head(sk, tp, skb); | 1493 | update_send_head(sk, skb); |
1488 | tcp_cwnd_validate(sk, tp); | 1494 | tcp_cwnd_validate(sk); |
1489 | return; | 1495 | return; |
1490 | } | 1496 | } |
1491 | } | 1497 | } |
@@ -1933,7 +1939,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1933 | * segments to send. | 1939 | * segments to send. |
1934 | */ | 1940 | */ |
1935 | 1941 | ||
1936 | if (tcp_may_send_now(sk, tp)) | 1942 | if (tcp_may_send_now(sk)) |
1937 | return; | 1943 | return; |
1938 | 1944 | ||
1939 | if (tp->forward_skb_hint) { | 1945 | if (tp->forward_skb_hint) { |
@@ -2023,7 +2029,7 @@ void tcp_send_fin(struct sock *sk) | |||
2023 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; | 2029 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; |
2024 | tcp_queue_skb(sk, skb); | 2030 | tcp_queue_skb(sk, skb); |
2025 | } | 2031 | } |
2026 | __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); | 2032 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); |
2027 | } | 2033 | } |
2028 | 2034 | ||
2029 | /* We get here when a process closes a file descriptor (either due to | 2035 | /* We get here when a process closes a file descriptor (either due to |
@@ -2033,7 +2039,6 @@ void tcp_send_fin(struct sock *sk) | |||
2033 | */ | 2039 | */ |
2034 | void tcp_send_active_reset(struct sock *sk, gfp_t priority) | 2040 | void tcp_send_active_reset(struct sock *sk, gfp_t priority) |
2035 | { | 2041 | { |
2036 | struct tcp_sock *tp = tcp_sk(sk); | ||
2037 | struct sk_buff *skb; | 2042 | struct sk_buff *skb; |
2038 | 2043 | ||
2039 | /* NOTE: No TCP options attached and we never retransmit this. */ | 2044 | /* NOTE: No TCP options attached and we never retransmit this. */ |
@@ -2053,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
2053 | skb_shinfo(skb)->gso_type = 0; | 2058 | skb_shinfo(skb)->gso_type = 0; |
2054 | 2059 | ||
2055 | /* Send it off. */ | 2060 | /* Send it off. */ |
2056 | TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); | 2061 | TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk); |
2057 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; | 2062 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; |
2058 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2063 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2059 | if (tcp_transmit_skb(sk, skb, 0, priority)) | 2064 | if (tcp_transmit_skb(sk, skb, 0, priority)) |
@@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk) | |||
2271 | skb_reserve(buff, MAX_TCP_HEADER); | 2276 | skb_reserve(buff, MAX_TCP_HEADER); |
2272 | 2277 | ||
2273 | TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; | 2278 | TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; |
2274 | TCP_ECN_send_syn(sk, tp, buff); | 2279 | TCP_ECN_send_syn(sk, buff); |
2275 | TCP_SKB_CB(buff)->sacked = 0; | 2280 | TCP_SKB_CB(buff)->sacked = 0; |
2276 | skb_shinfo(buff)->gso_segs = 1; | 2281 | skb_shinfo(buff)->gso_segs = 1; |
2277 | skb_shinfo(buff)->gso_size = 0; | 2282 | skb_shinfo(buff)->gso_size = 0; |
@@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk) | |||
2363 | { | 2368 | { |
2364 | /* If we have been reset, we may not send again. */ | 2369 | /* If we have been reset, we may not send again. */ |
2365 | if (sk->sk_state != TCP_CLOSE) { | 2370 | if (sk->sk_state != TCP_CLOSE) { |
2366 | struct tcp_sock *tp = tcp_sk(sk); | ||
2367 | struct sk_buff *buff; | 2371 | struct sk_buff *buff; |
2368 | 2372 | ||
2369 | /* We are not putting this on the write queue, so | 2373 | /* We are not putting this on the write queue, so |
@@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk) | |||
2389 | skb_shinfo(buff)->gso_type = 0; | 2393 | skb_shinfo(buff)->gso_type = 0; |
2390 | 2394 | ||
2391 | /* Send it off, this clears delayed acks for us. */ | 2395 | /* Send it off, this clears delayed acks for us. */ |
2392 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); | 2396 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk); |
2393 | TCP_SKB_CB(buff)->when = tcp_time_stamp; | 2397 | TCP_SKB_CB(buff)->when = tcp_time_stamp; |
2394 | tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); | 2398 | tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); |
2395 | } | 2399 | } |
@@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
2467 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2471 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2468 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | 2472 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
2469 | if (!err) { | 2473 | if (!err) { |
2470 | update_send_head(sk, tp, skb); | 2474 | update_send_head(sk, skb); |
2471 | } | 2475 | } |
2472 | return err; | 2476 | return err; |
2473 | } else { | 2477 | } else { |