diff options
author | David S. Miller <davem@davemloft.net> | 2005-07-05 18:19:23 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-07-05 18:19:23 -0400 |
commit | a2e2a59c93cc8ba39caa9011c2573f429e40ccd9 (patch) | |
tree | 6eae3953758b1dd84fa42b0545dd5dfd95c65b19 | |
parent | 92df7b518dcb113de8bc2494e3cd275ad887f12b (diff) |
[TCP]: Fix redundant calculations of tcp_current_mss()
tcp_write_xmit() uses tcp_current_mss(), but some of it's callers,
namely __tcp_push_pending_frames(), already has this value available
already.
While we're here, fix the "cur_mss" argument to be "unsigned int"
instead of plain "unsigned".
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 16 |
2 files changed, 5 insertions, 13 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index f32e7aed2c75..9416236cc395 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -850,7 +850,7 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |||
850 | 850 | ||
851 | extern void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb); | 851 | extern void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb); |
852 | extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | 852 | extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, |
853 | unsigned cur_mss, int nonagle); | 853 | unsigned int cur_mss, int nonagle); |
854 | extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); | 854 | extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); |
855 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | 855 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); |
856 | extern void tcp_xmit_retransmit_queue(struct sock *); | 856 | extern void tcp_xmit_retransmit_queue(struct sock *); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2a8409c3af1a..e292e11c7319 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -839,11 +839,10 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | |||
839 | * Returns 1, if no segments are in flight and we have queued segments, but | 839 | * Returns 1, if no segments are in flight and we have queued segments, but |
840 | * cannot send anything now because of SWS or another problem. | 840 | * cannot send anything now because of SWS or another problem. |
841 | */ | 841 | */ |
842 | static int tcp_write_xmit(struct sock *sk, int nonagle) | 842 | static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) |
843 | { | 843 | { |
844 | struct tcp_sock *tp = tcp_sk(sk); | 844 | struct tcp_sock *tp = tcp_sk(sk); |
845 | struct sk_buff *skb; | 845 | struct sk_buff *skb; |
846 | unsigned int mss_now; | ||
847 | int sent_pkts; | 846 | int sent_pkts; |
848 | 847 | ||
849 | /* If we are closed, the bytes will have to remain here. | 848 | /* If we are closed, the bytes will have to remain here. |
@@ -853,13 +852,6 @@ static int tcp_write_xmit(struct sock *sk, int nonagle) | |||
853 | if (unlikely(sk->sk_state == TCP_CLOSE)) | 852 | if (unlikely(sk->sk_state == TCP_CLOSE)) |
854 | return 0; | 853 | return 0; |
855 | 854 | ||
856 | |||
857 | /* Account for SACKS, we may need to fragment due to this. | ||
858 | * It is just like the real MSS changing on us midstream. | ||
859 | * We also handle things correctly when the user adds some | ||
860 | * IP options mid-stream. Silly to do, but cover it. | ||
861 | */ | ||
862 | mss_now = tcp_current_mss(sk, 1); | ||
863 | sent_pkts = 0; | 855 | sent_pkts = 0; |
864 | while ((skb = sk->sk_send_head) && | 856 | while ((skb = sk->sk_send_head) && |
865 | tcp_snd_test(sk, skb, mss_now, | 857 | tcp_snd_test(sk, skb, mss_now, |
@@ -897,7 +889,7 @@ static int tcp_write_xmit(struct sock *sk, int nonagle) | |||
897 | * The socket must be locked by the caller. | 889 | * The socket must be locked by the caller. |
898 | */ | 890 | */ |
899 | void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | 891 | void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, |
900 | unsigned cur_mss, int nonagle) | 892 | unsigned int cur_mss, int nonagle) |
901 | { | 893 | { |
902 | struct sk_buff *skb = sk->sk_send_head; | 894 | struct sk_buff *skb = sk->sk_send_head; |
903 | 895 | ||
@@ -905,7 +897,7 @@ void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | |||
905 | if (!tcp_skb_is_last(sk, skb)) | 897 | if (!tcp_skb_is_last(sk, skb)) |
906 | nonagle = TCP_NAGLE_PUSH; | 898 | nonagle = TCP_NAGLE_PUSH; |
907 | if (!tcp_snd_test(sk, skb, cur_mss, nonagle) || | 899 | if (!tcp_snd_test(sk, skb, cur_mss, nonagle) || |
908 | tcp_write_xmit(sk, nonagle)) | 900 | tcp_write_xmit(sk, cur_mss, nonagle)) |
909 | tcp_check_probe_timer(sk, tp); | 901 | tcp_check_probe_timer(sk, tp); |
910 | } | 902 | } |
911 | } | 903 | } |
@@ -916,7 +908,7 @@ void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb) | |||
916 | 908 | ||
917 | if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) || | 909 | if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) || |
918 | tcp_packets_in_flight(tp) >= tp->snd_cwnd || | 910 | tcp_packets_in_flight(tp) >= tp->snd_cwnd || |
919 | tcp_write_xmit(sk, tp->nonagle)) | 911 | tcp_write_xmit(sk, tcp_current_mss(sk, 1), tp->nonagle)) |
920 | tcp_check_probe_timer(sk, tp); | 912 | tcp_check_probe_timer(sk, tp); |
921 | } | 913 | } |
922 | 914 | ||