aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2006-06-29 15:30:00 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-29 19:58:08 -0400
commitb0da8537037f337103348f239ad901477e907aa8 (patch)
tree498a5dceb0d536fa54dcf4acc26ae1d1b43dfaf1 /net
parent877ce7c1b3afd69a9b1caeb1b9964c992641f52a (diff)
[NET]: Add ECN support for TSO
In the current TSO implementation, NETIF_F_TSO and ECN cannot be turned on together in a TCP connection. The problem is that most hardware that supports TSO does not handle CWR correctly if it is set in the TSO packet. Correct handling requires CWR to be set in the first packet only if it is set in the TSO header. This patch adds the ability to turn on NETIF_F_TSO and ECN using GSO if necessary to handle TSO packets with CWR set. Hardware that handles CWR correctly can turn on NETIF_F_TSO_ECN in the dev-> features flag. All TSO packets with CWR set will have the SKB_GSO_TCPV4_ECN set. If the output device does not have the NETIF_F_TSO_ECN feature set, GSO will split the packet up correctly with CWR only set in the first segment. With help from Herbert Xu <herbert@gondor.apana.org.au>. Since ECN can always be enabled with TSO, the SOCK_NO_LARGESEND sock flag is completely removed. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c2
3 files changed, 0 insertions, 8 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 94fe5b1f9dc..7fa0b4a8a38 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4178,8 +4178,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4178 */ 4178 */
4179 4179
4180 TCP_ECN_rcv_synack(tp, th); 4180 TCP_ECN_rcv_synack(tp, th);
4181 if (tp->ecn_flags&TCP_ECN_OK)
4182 sock_set_flag(sk, SOCK_NO_LARGESEND);
4183 4181
4184 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 4182 tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
4185 tcp_ack(sk, skb, FLAG_SLOWPATH); 4183 tcp_ack(sk, skb, FLAG_SLOWPATH);
@@ -4322,8 +4320,6 @@ discard:
4322 tp->max_window = tp->snd_wnd; 4320 tp->max_window = tp->snd_wnd;
4323 4321
4324 TCP_ECN_rcv_syn(tp, th); 4322 TCP_ECN_rcv_syn(tp, th);
4325 if (tp->ecn_flags&TCP_ECN_OK)
4326 sock_set_flag(sk, SOCK_NO_LARGESEND);
4327 4323
4328 tcp_mtup_init(sk); 4324 tcp_mtup_init(sk);
4329 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 4325 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 2b9b7f6c7f7..54b2ef7d3ef 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -440,8 +440,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
440 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 440 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
441 newtp->rx_opt.mss_clamp = req->mss; 441 newtp->rx_opt.mss_clamp = req->mss;
442 TCP_ECN_openreq_child(newtp, req); 442 TCP_ECN_openreq_child(newtp, req);
443 if (newtp->ecn_flags&TCP_ECN_OK)
444 sock_set_flag(newsk, SOCK_NO_LARGESEND);
445 443
446 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); 444 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
447 } 445 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bdd71db8bf9..5a7cb4a9c86 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2044,8 +2044,6 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2044 memset(th, 0, sizeof(struct tcphdr)); 2044 memset(th, 0, sizeof(struct tcphdr));
2045 th->syn = 1; 2045 th->syn = 1;
2046 th->ack = 1; 2046 th->ack = 1;
2047 if (dst->dev->features&NETIF_F_TSO)
2048 ireq->ecn_ok = 0;
2049 TCP_ECN_make_synack(req, th); 2047 TCP_ECN_make_synack(req, th);
2050 th->source = inet_sk(sk)->sport; 2048 th->source = inet_sk(sk)->sport;
2051 th->dest = ireq->rmt_port; 2049 th->dest = ireq->rmt_port;