aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-07-05 18:20:09 -0400
committerDavid S. Miller <davem@davemloft.net>2005-07-05 18:20:09 -0400
commitaa93466bdfd901b926e033801f0b82b3eaa67be2 (patch)
tree5ee3bec452e795d48f380aee4494e06e5aba038e
parent7f4dd0a9438c73cbb1c240ece31390cf2c57294e (diff)
[TCP]: Eliminate redundant computations in tcp_write_xmit().
tcp_snd_test() is run for every packet output by a single call to tcp_write_xmit(), but this is not necessary. For one, the congestion window space needs to only be calculated one time, then used throughout the duration of the loop. This cleanup also makes experimenting with different TSO packetization schemes much easier. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_output.c40
1 files changed, 31 insertions, 9 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8327e5e86d15..0a4cd24b6578 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -887,6 +887,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
887{ 887{
888 struct tcp_sock *tp = tcp_sk(sk); 888 struct tcp_sock *tp = tcp_sk(sk);
889 struct sk_buff *skb; 889 struct sk_buff *skb;
890 unsigned int tso_segs, cwnd_quota;
890 int sent_pkts; 891 int sent_pkts;
891 892
892 /* If we are closed, the bytes will have to remain here. 893 /* If we are closed, the bytes will have to remain here.
@@ -896,19 +897,31 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
896 if (unlikely(sk->sk_state == TCP_CLOSE)) 897 if (unlikely(sk->sk_state == TCP_CLOSE))
897 return 0; 898 return 0;
898 899
900 skb = sk->sk_send_head;
901 if (unlikely(!skb))
902 return 0;
903
904 tso_segs = tcp_init_tso_segs(sk, skb);
905 cwnd_quota = tcp_cwnd_test(tp, skb);
899 sent_pkts = 0; 906 sent_pkts = 0;
900 while ((skb = sk->sk_send_head) && 907
901 tcp_snd_test(sk, skb, mss_now, 908 while (cwnd_quota >= tso_segs) {
902 tcp_skb_is_last(sk, skb) ? nonagle : 909 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
903 TCP_NAGLE_PUSH)) { 910 (tcp_skb_is_last(sk, skb) ?
904 if (skb->len > mss_now) { 911 nonagle : TCP_NAGLE_PUSH))))
905 if (tcp_fragment(sk, skb, mss_now)) 912 break;
913
914 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
915 break;
916
917 if (unlikely(skb->len > mss_now)) {
918 if (unlikely(tcp_fragment(sk, skb, mss_now)))
906 break; 919 break;
907 } 920 }
908 921
909 TCP_SKB_CB(skb)->when = tcp_time_stamp; 922 TCP_SKB_CB(skb)->when = tcp_time_stamp;
910 tcp_tso_set_push(skb); 923 tcp_tso_set_push(skb);
911 if (tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))) 924 if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))))
912 break; 925 break;
913 926
914 /* Advance the send_head. This one is sent out. 927 /* Advance the send_head. This one is sent out.
@@ -917,10 +930,19 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
917 update_send_head(sk, tp, skb); 930 update_send_head(sk, tp, skb);
918 931
919 tcp_minshall_update(tp, mss_now, skb); 932 tcp_minshall_update(tp, mss_now, skb);
920 sent_pkts = 1; 933 sent_pkts++;
934
935 /* Do not optimize this to use tso_segs. If we chopped up
936 * the packet above, tso_segs will no longer be valid.
937 */
938 cwnd_quota -= tcp_skb_pcount(skb);
939 skb = sk->sk_send_head;
940 if (!skb)
941 break;
942 tso_segs = tcp_init_tso_segs(sk, skb);
921 } 943 }
922 944
923 if (sent_pkts) { 945 if (likely(sent_pkts)) {
924 tcp_cwnd_validate(sk, tp); 946 tcp_cwnd_validate(sk, tp);
925 return 0; 947 return 0;
926 } 948 }