aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-12-04 00:24:48 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 00:24:48 -0500
commitf8269a495a1924f8b023532dd3e77423432db810 (patch)
tree73b5f7b9e059a79b890c53572e0e048a2078577d /net
parent5176da7e5318669220e4d2fa856223054a3efc9f (diff)
tcp: make urg+gso work for real this time
I should have noticed this earlier... :-) The previous solution to URG+GSO/TSO will cause SACK block tcp_fragment to do zig-zig patterns, or even worse, a steep downward slope into packet counting because each skb pcount would be truncated to pcount of 2 and then the following fragments of the later portion would restore the window again. Basically this reverts "tcp: Do not use TSO/GSO when there is urgent data" (33cf71cee1). It also removes some unnecessary code from tcp_current_mss that didn't work as intented either (could be that something was changed down the road, or it might have been broken since the dawn of time) because it only works once urg is already written while this bug shows up starting from ~64k before the urg point. The retransmissions already are split to mss sized chunks, so only new data sending paths need splitting in case they have a segment otherwise suitable for gso/tso. The actually check can be improved to be more narrow but since this is late -rc already, I'll postpone thinking the more fine-grained things. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_output.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 85b07eba1879..fe3b4bdfd251 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -722,8 +722,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
722static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 722static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
723 unsigned int mss_now) 723 unsigned int mss_now)
724{ 724{
725 if (skb->len <= mss_now || !sk_can_gso(sk) || 725 if (skb->len <= mss_now || !sk_can_gso(sk)) {
726 tcp_urg_mode(tcp_sk(sk))) {
727 /* Avoid the costly divide in the normal 726 /* Avoid the costly divide in the normal
728 * non-TSO case. 727 * non-TSO case.
729 */ 728 */
@@ -1029,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1029 1028
1030/* Compute the current effective MSS, taking SACKs and IP options, 1029/* Compute the current effective MSS, taking SACKs and IP options,
1031 * and even PMTU discovery events into account. 1030 * and even PMTU discovery events into account.
1032 *
1033 * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up
1034 * cannot be large. However, taking into account rare use of URG, this
1035 * is not a big flaw.
1036 */ 1031 */
1037unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 1032unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1038{ 1033{
@@ -1047,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1047 1042
1048 mss_now = tp->mss_cache; 1043 mss_now = tp->mss_cache;
1049 1044
1050 if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) 1045 if (large_allowed && sk_can_gso(sk))
1051 doing_tso = 1; 1046 doing_tso = 1;
1052 1047
1053 if (dst) { 1048 if (dst) {
@@ -1164,9 +1159,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
1164{ 1159{
1165 int tso_segs = tcp_skb_pcount(skb); 1160 int tso_segs = tcp_skb_pcount(skb);
1166 1161
1167 if (!tso_segs || 1162 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1168 (tso_segs > 1 && (tcp_skb_mss(skb) != mss_now ||
1169 tcp_urg_mode(tcp_sk(sk))))) {
1170 tcp_set_skb_tso_segs(sk, skb, mss_now); 1163 tcp_set_skb_tso_segs(sk, skb, mss_now);
1171 tso_segs = tcp_skb_pcount(skb); 1164 tso_segs = tcp_skb_pcount(skb);
1172 } 1165 }
@@ -1519,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk)
1519 * send_head. This happens as incoming acks open up the remote 1512 * send_head. This happens as incoming acks open up the remote
1520 * window for us. 1513 * window for us.
1521 * 1514 *
1515 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1516 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1517 * account rare use of URG, this is not a big flaw.
1518 *
1522 * Returns 1, if no segments are in flight and we have queued segments, but 1519 * Returns 1, if no segments are in flight and we have queued segments, but
1523 * cannot send anything now because of SWS or another problem. 1520 * cannot send anything now because of SWS or another problem.
1524 */ 1521 */
@@ -1570,7 +1567,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1570 } 1567 }
1571 1568
1572 limit = mss_now; 1569 limit = mss_now;
1573 if (tso_segs > 1) 1570 if (tso_segs > 1 && !tcp_urg_mode(tp))
1574 limit = tcp_mss_split_point(sk, skb, mss_now, 1571 limit = tcp_mss_split_point(sk, skb, mss_now,
1575 cwnd_quota); 1572 cwnd_quota);
1576 1573
@@ -1619,6 +1616,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1619 */ 1616 */
1620void tcp_push_one(struct sock *sk, unsigned int mss_now) 1617void tcp_push_one(struct sock *sk, unsigned int mss_now)
1621{ 1618{
1619 struct tcp_sock *tp = tcp_sk(sk);
1622 struct sk_buff *skb = tcp_send_head(sk); 1620 struct sk_buff *skb = tcp_send_head(sk);
1623 unsigned int tso_segs, cwnd_quota; 1621 unsigned int tso_segs, cwnd_quota;
1624 1622
@@ -1633,7 +1631,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1633 BUG_ON(!tso_segs); 1631 BUG_ON(!tso_segs);
1634 1632
1635 limit = mss_now; 1633 limit = mss_now;
1636 if (tso_segs > 1) 1634 if (tso_segs > 1 && !tcp_urg_mode(tp))
1637 limit = tcp_mss_split_point(sk, skb, mss_now, 1635 limit = tcp_mss_split_point(sk, skb, mss_now,
1638 cwnd_quota); 1636 cwnd_quota);
1639 1637