diff options
author | Petr Tesarik <ptesarik@suse.cz> | 2008-11-21 19:42:58 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-21 19:42:58 -0500 |
commit | 33cf71cee14743185305c61625c4544885055733 (patch) | |
tree | d05c9fb2fd12d8eede22c261c2db57d30f96a73a /net/ipv4/tcp_output.c | |
parent | 38ae07e44bb2dc86770555a1acafcb937ec74478 (diff) |
tcp: Do not use TSO/GSO when there is urgent data
This patch fixes http://bugzilla.kernel.org/show_bug.cgi?id=12014
Since most (if not all) implementations of TSO and even the in-kernel
software GSO do not update the urgent pointer when splitting a large
segment, it is necessary to turn off TSO/GSO for all outgoing traffic
with the URG pointer set.
Looking at tcp_current_mss (and the preceding comment) I even think
this was the original intention. However, this approach is insufficient,
because TSO/GSO is turned off only for newly created frames, not for
frames which were already pending at the arrival of a message with
MSG_OOB set. These frames were created when TSO/GSO was enabled,
so they may be large, and they will have the urgent pointer set
in tcp_transmit_skb().
With this patch, such large packets will be fragmented again before
going to the transmit routine.
As a side note, at least the following NICs are known to screw up
the urgent pointer in the TCP header when doing TSO:
Intel 82566MM (PCI ID 8086:1049)
Intel 82566DC (PCI ID 8086:104b)
Intel 82541GI (PCI ID 8086:1076)
Broadcom NetXtreme II BCM5708 (PCI ID 14e4:164c)
Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ba85d8831893..85b07eba1879 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -722,7 +722,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |||
722 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, | 722 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, |
723 | unsigned int mss_now) | 723 | unsigned int mss_now) |
724 | { | 724 | { |
725 | if (skb->len <= mss_now || !sk_can_gso(sk)) { | 725 | if (skb->len <= mss_now || !sk_can_gso(sk) || |
726 | tcp_urg_mode(tcp_sk(sk))) { | ||
726 | /* Avoid the costly divide in the normal | 727 | /* Avoid the costly divide in the normal |
727 | * non-TSO case. | 728 | * non-TSO case. |
728 | */ | 729 | */ |
@@ -1163,7 +1164,9 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, | |||
1163 | { | 1164 | { |
1164 | int tso_segs = tcp_skb_pcount(skb); | 1165 | int tso_segs = tcp_skb_pcount(skb); |
1165 | 1166 | ||
1166 | if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { | 1167 | if (!tso_segs || |
1168 | (tso_segs > 1 && (tcp_skb_mss(skb) != mss_now || | ||
1169 | tcp_urg_mode(tcp_sk(sk))))) { | ||
1167 | tcp_set_skb_tso_segs(sk, skb, mss_now); | 1170 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
1168 | tso_segs = tcp_skb_pcount(skb); | 1171 | tso_segs = tcp_skb_pcount(skb); |
1169 | } | 1172 | } |