diff options
-rw-r--r-- | include/net/tcp.h | 26 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 79 |
2 files changed, 52 insertions, 53 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 4888f9d3f56b..f32e7aed2c75 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -848,7 +848,6 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |||
848 | 848 | ||
849 | /* tcp_output.c */ | 849 | /* tcp_output.c */ |
850 | 850 | ||
851 | extern int tcp_write_xmit(struct sock *, int nonagle); | ||
852 | extern void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb); | 851 | extern void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb); |
853 | extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | 852 | extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, |
854 | unsigned cur_mss, int nonagle); | 853 | unsigned cur_mss, int nonagle); |
@@ -868,6 +867,9 @@ extern void tcp_push_one(struct sock *, unsigned mss_now); | |||
868 | extern void tcp_send_ack(struct sock *sk); | 867 | extern void tcp_send_ack(struct sock *sk); |
869 | extern void tcp_send_delayed_ack(struct sock *sk); | 868 | extern void tcp_send_delayed_ack(struct sock *sk); |
870 | 869 | ||
870 | /* tcp_input.c */ | ||
871 | extern void tcp_cwnd_application_limited(struct sock *sk); | ||
872 | |||
871 | /* tcp_timer.c */ | 873 | /* tcp_timer.c */ |
872 | extern void tcp_init_xmit_timers(struct sock *); | 874 | extern void tcp_init_xmit_timers(struct sock *); |
873 | extern void tcp_clear_xmit_timers(struct sock *); | 875 | extern void tcp_clear_xmit_timers(struct sock *); |
@@ -1234,28 +1236,6 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp) | |||
1234 | tp->left_out = tp->sacked_out + tp->lost_out; | 1236 | tp->left_out = tp->sacked_out + tp->lost_out; |
1235 | } | 1237 | } |
1236 | 1238 | ||
1237 | extern void tcp_cwnd_application_limited(struct sock *sk); | ||
1238 | |||
1239 | /* Congestion window validation. (RFC2861) */ | ||
1240 | |||
1241 | static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | ||
1242 | { | ||
1243 | __u32 packets_out = tp->packets_out; | ||
1244 | |||
1245 | if (packets_out >= tp->snd_cwnd) { | ||
1246 | /* Network is feed fully. */ | ||
1247 | tp->snd_cwnd_used = 0; | ||
1248 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
1249 | } else { | ||
1250 | /* Network starves. */ | ||
1251 | if (tp->packets_out > tp->snd_cwnd_used) | ||
1252 | tp->snd_cwnd_used = tp->packets_out; | ||
1253 | |||
1254 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) | ||
1255 | tcp_cwnd_application_limited(sk); | ||
1256 | } | ||
1257 | } | ||
1258 | |||
1259 | /* Set slow start threshould and cwnd not falling to slow start */ | 1239 | /* Set slow start threshould and cwnd not falling to slow start */ |
1260 | static inline void __tcp_enter_cwr(struct tcp_sock *tp) | 1240 | static inline void __tcp_enter_cwr(struct tcp_sock *tp) |
1261 | { | 1241 | { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5e63ed09658d..a6375ca2a59e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -511,35 +511,6 @@ static inline int tcp_skb_is_last(const struct sock *sk, | |||
511 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | 511 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; |
512 | } | 512 | } |
513 | 513 | ||
514 | /* Push out any pending frames which were held back due to | ||
515 | * TCP_CORK or attempt at coalescing tiny packets. | ||
516 | * The socket must be locked by the caller. | ||
517 | */ | ||
518 | void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | ||
519 | unsigned cur_mss, int nonagle) | ||
520 | { | ||
521 | struct sk_buff *skb = sk->sk_send_head; | ||
522 | |||
523 | if (skb) { | ||
524 | if (!tcp_skb_is_last(sk, skb)) | ||
525 | nonagle = TCP_NAGLE_PUSH; | ||
526 | if (!tcp_snd_test(sk, skb, cur_mss, nonagle) || | ||
527 | tcp_write_xmit(sk, nonagle)) | ||
528 | tcp_check_probe_timer(sk, tp); | ||
529 | } | ||
530 | tcp_cwnd_validate(sk, tp); | ||
531 | } | ||
532 | |||
533 | void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb) | ||
534 | { | ||
535 | struct tcp_sock *tp = tcp_sk(sk); | ||
536 | |||
537 | if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) || | ||
538 | tcp_packets_in_flight(tp) >= tp->snd_cwnd || | ||
539 | tcp_write_xmit(sk, tp->nonagle)) | ||
540 | tcp_check_probe_timer(sk, tp); | ||
541 | } | ||
542 | |||
543 | int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) | 514 | int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) |
544 | { | 515 | { |
545 | struct sk_buff *skb = sk->sk_send_head; | 516 | struct sk_buff *skb = sk->sk_send_head; |
@@ -841,6 +812,26 @@ unsigned int tcp_current_mss(struct sock *sk, int large) | |||
841 | return mss_now; | 812 | return mss_now; |
842 | } | 813 | } |
843 | 814 | ||
815 | /* Congestion window validation. (RFC2861) */ | ||
816 | |||
817 | static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | ||
818 | { | ||
819 | __u32 packets_out = tp->packets_out; | ||
820 | |||
821 | if (packets_out >= tp->snd_cwnd) { | ||
822 | /* Network is feed fully. */ | ||
823 | tp->snd_cwnd_used = 0; | ||
824 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
825 | } else { | ||
826 | /* Network starves. */ | ||
827 | if (tp->packets_out > tp->snd_cwnd_used) | ||
828 | tp->snd_cwnd_used = tp->packets_out; | ||
829 | |||
830 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) | ||
831 | tcp_cwnd_application_limited(sk); | ||
832 | } | ||
833 | } | ||
834 | |||
844 | /* This routine writes packets to the network. It advances the | 835 | /* This routine writes packets to the network. It advances the |
845 | * send_head. This happens as incoming acks open up the remote | 836 | * send_head. This happens as incoming acks open up the remote |
846 | * window for us. | 837 | * window for us. |
@@ -848,7 +839,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large) | |||
848 | * Returns 1, if no segments are in flight and we have queued segments, but | 839 | * Returns 1, if no segments are in flight and we have queued segments, but |
849 | * cannot send anything now because of SWS or another problem. | 840 | * cannot send anything now because of SWS or another problem. |
850 | */ | 841 | */ |
851 | int tcp_write_xmit(struct sock *sk, int nonagle) | 842 | static int tcp_write_xmit(struct sock *sk, int nonagle) |
852 | { | 843 | { |
853 | struct tcp_sock *tp = tcp_sk(sk); | 844 | struct tcp_sock *tp = tcp_sk(sk); |
854 | unsigned int mss_now; | 845 | unsigned int mss_now; |
@@ -901,6 +892,34 @@ int tcp_write_xmit(struct sock *sk, int nonagle) | |||
901 | return 0; | 892 | return 0; |
902 | } | 893 | } |
903 | 894 | ||
895 | /* Push out any pending frames which were held back due to | ||
896 | * TCP_CORK or attempt at coalescing tiny packets. | ||
897 | * The socket must be locked by the caller. | ||
898 | */ | ||
899 | void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | ||
900 | unsigned cur_mss, int nonagle) | ||
901 | { | ||
902 | struct sk_buff *skb = sk->sk_send_head; | ||
903 | |||
904 | if (skb) { | ||
905 | if (!tcp_skb_is_last(sk, skb)) | ||
906 | nonagle = TCP_NAGLE_PUSH; | ||
907 | if (!tcp_snd_test(sk, skb, cur_mss, nonagle) || | ||
908 | tcp_write_xmit(sk, nonagle)) | ||
909 | tcp_check_probe_timer(sk, tp); | ||
910 | } | ||
911 | } | ||
912 | |||
913 | void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb) | ||
914 | { | ||
915 | struct tcp_sock *tp = tcp_sk(sk); | ||
916 | |||
917 | if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) || | ||
918 | tcp_packets_in_flight(tp) >= tp->snd_cwnd || | ||
919 | tcp_write_xmit(sk, tp->nonagle)) | ||
920 | tcp_check_probe_timer(sk, tp); | ||
921 | } | ||
922 | |||
904 | /* This function returns the amount that we can raise the | 923 | /* This function returns the amount that we can raise the |
905 | * usable window based on the following constraints | 924 | * usable window based on the following constraints |
906 | * | 925 | * |