aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3ed6fc15815b..566045e58437 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -861,7 +861,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
861 u16 flags; 861 u16 flags;
862 862
863 /* All of a TSO frame must be composed of paged data. */ 863 /* All of a TSO frame must be composed of paged data. */
864 BUG_ON(skb->len != skb->data_len); 864 if (skb->len != skb->data_len)
865 return tcp_fragment(sk, skb, len, mss_now);
865 866
866 buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); 867 buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
867 if (unlikely(buff == NULL)) 868 if (unlikely(buff == NULL))
@@ -974,6 +975,8 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
974 975
975 sent_pkts = 0; 976 sent_pkts = 0;
976 while ((skb = sk->sk_send_head)) { 977 while ((skb = sk->sk_send_head)) {
978 unsigned int limit;
979
977 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 980 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
978 BUG_ON(!tso_segs); 981 BUG_ON(!tso_segs);
979 982
@@ -994,9 +997,10 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
994 break; 997 break;
995 } 998 }
996 999
1000 limit = mss_now;
997 if (tso_segs > 1) { 1001 if (tso_segs > 1) {
998 u32 limit = tcp_window_allows(tp, skb, 1002 limit = tcp_window_allows(tp, skb,
999 mss_now, cwnd_quota); 1003 mss_now, cwnd_quota);
1000 1004
1001 if (skb->len < limit) { 1005 if (skb->len < limit) {
1002 unsigned int trim = skb->len % mss_now; 1006 unsigned int trim = skb->len % mss_now;
@@ -1004,15 +1008,12 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1004 if (trim) 1008 if (trim)
1005 limit = skb->len - trim; 1009 limit = skb->len - trim;
1006 } 1010 }
1007 if (skb->len > limit) {
1008 if (tso_fragment(sk, skb, limit, mss_now))
1009 break;
1010 }
1011 } else if (unlikely(skb->len > mss_now)) {
1012 if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
1013 break;
1014 } 1011 }
1015 1012
1013 if (skb->len > limit &&
1014 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1015 break;
1016
1016 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1017 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1017 1018
1018 if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))) 1019 if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))))
@@ -1064,11 +1065,14 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1064 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1065 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1065 1066
1066 if (likely(cwnd_quota)) { 1067 if (likely(cwnd_quota)) {
1068 unsigned int limit;
1069
1067 BUG_ON(!tso_segs); 1070 BUG_ON(!tso_segs);
1068 1071
1072 limit = mss_now;
1069 if (tso_segs > 1) { 1073 if (tso_segs > 1) {
1070 u32 limit = tcp_window_allows(tp, skb, 1074 limit = tcp_window_allows(tp, skb,
1071 mss_now, cwnd_quota); 1075 mss_now, cwnd_quota);
1072 1076
1073 if (skb->len < limit) { 1077 if (skb->len < limit) {
1074 unsigned int trim = skb->len % mss_now; 1078 unsigned int trim = skb->len % mss_now;
@@ -1076,15 +1080,12 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1076 if (trim) 1080 if (trim)
1077 limit = skb->len - trim; 1081 limit = skb->len - trim;
1078 } 1082 }
1079 if (skb->len > limit) {
1080 if (unlikely(tso_fragment(sk, skb, limit, mss_now)))
1081 return;
1082 }
1083 } else if (unlikely(skb->len > mss_now)) {
1084 if (unlikely(tcp_fragment(sk, skb, mss_now, mss_now)))
1085 return;
1086 } 1083 }
1087 1084
1085 if (skb->len > limit &&
1086 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1087 return;
1088
1088 /* Send it out now. */ 1089 /* Send it out now. */
1089 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1090 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1090 1091