diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2008-12-06 01:41:26 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-12-06 01:41:26 -0500 |
commit | 775ffabf77a648d78fe1d20cb3a620e771abb921 (patch) | |
tree | 92d953047db446134ddae8facf209fa71d14c992 /net/ipv4/tcp_input.c | |
parent | 9969ca5f205988fb96461075cb4914c55cf166b5 (diff) |
tcp: make mtu probe failure to not break gso'ed skbs unnecessarily
I noticed that since skb->len has nothing to do with actual segment
length with gso, we need to figure it out separately, reuse
a function from the recent shifting stuff (generalize it).
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 19 |
1 files changed, 7 insertions, 12 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 33902f6799c3..21c670190780 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1445,14 +1445,9 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, | |||
1445 | /* I wish gso_size would have a bit more sane initialization than | 1445 | /* I wish gso_size would have a bit more sane initialization than |
1446 | * something-or-zero which complicates things | 1446 | * something-or-zero which complicates things |
1447 | */ | 1447 | */ |
1448 | static int tcp_shift_mss(struct sk_buff *skb) | 1448 | static int tcp_skb_seglen(struct sk_buff *skb) |
1449 | { | 1449 | { |
1450 | int mss = tcp_skb_mss(skb); | 1450 | return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); |
1451 | |||
1452 | if (!mss) | ||
1453 | mss = skb->len; | ||
1454 | |||
1455 | return mss; | ||
1456 | } | 1451 | } |
1457 | 1452 | ||
1458 | /* Shifting pages past head area doesn't work */ | 1453 | /* Shifting pages past head area doesn't work */ |
@@ -1503,12 +1498,12 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, | |||
1503 | if (in_sack) { | 1498 | if (in_sack) { |
1504 | len = skb->len; | 1499 | len = skb->len; |
1505 | pcount = tcp_skb_pcount(skb); | 1500 | pcount = tcp_skb_pcount(skb); |
1506 | mss = tcp_shift_mss(skb); | 1501 | mss = tcp_skb_seglen(skb); |
1507 | 1502 | ||
1508 | /* TODO: Fix DSACKs to not fragment already SACKed and we can | 1503 | /* TODO: Fix DSACKs to not fragment already SACKed and we can |
1509 | * drop this restriction as unnecessary | 1504 | * drop this restriction as unnecessary |
1510 | */ | 1505 | */ |
1511 | if (mss != tcp_shift_mss(prev)) | 1506 | if (mss != tcp_skb_seglen(prev)) |
1512 | goto fallback; | 1507 | goto fallback; |
1513 | } else { | 1508 | } else { |
1514 | if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) | 1509 | if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) |
@@ -1549,7 +1544,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, | |||
1549 | /* TODO: Fix DSACKs to not fragment already SACKed and we can | 1544 | /* TODO: Fix DSACKs to not fragment already SACKed and we can |
1550 | * drop this restriction as unnecessary | 1545 | * drop this restriction as unnecessary |
1551 | */ | 1546 | */ |
1552 | if (mss != tcp_shift_mss(prev)) | 1547 | if (mss != tcp_skb_seglen(prev)) |
1553 | goto fallback; | 1548 | goto fallback; |
1554 | 1549 | ||
1555 | if (len == mss) { | 1550 | if (len == mss) { |
@@ -1578,7 +1573,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, | |||
1578 | if (!skb_can_shift(skb) || | 1573 | if (!skb_can_shift(skb) || |
1579 | (skb == tcp_send_head(sk)) || | 1574 | (skb == tcp_send_head(sk)) || |
1580 | ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || | 1575 | ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || |
1581 | (mss != tcp_shift_mss(skb))) | 1576 | (mss != tcp_skb_seglen(skb))) |
1582 | goto out; | 1577 | goto out; |
1583 | 1578 | ||
1584 | len = skb->len; | 1579 | len = skb->len; |
@@ -2853,7 +2848,7 @@ void tcp_simple_retransmit(struct sock *sk) | |||
2853 | tcp_for_write_queue(skb, sk) { | 2848 | tcp_for_write_queue(skb, sk) { |
2854 | if (skb == tcp_send_head(sk)) | 2849 | if (skb == tcp_send_head(sk)) |
2855 | break; | 2850 | break; |
2856 | if (skb->len > mss && | 2851 | if (tcp_skb_seglen(skb) > mss && |
2857 | !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { | 2852 | !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { |
2858 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { | 2853 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { |
2859 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 2854 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |