aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-12-25 00:33:45 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:00:06 -0500
commit0e3a4803aa06cd7bc2cfc1d04289df4f6027640a (patch)
treec3af99ceea81cd14e14c96fe0c85f39236de933b /net/ipv4/tcp_output.c
parent7ffc49a6ee92b7138c2ee28073a8e10e58335d62 (diff)
[TCP]: Force TSO splits to MSS boundaries
If snd_wnd - snd_nxt wasn't multiple of MSS, skb was split on odd boundary by the callers of tcp_window_allows. We try really hard to avoid unnecessary modulos. Therefore the old caller side check "if (skb->len < limit)" was too wide as well because limit is not bound in any way to skb->len and can cause spurious testing for trimming in the middle of the queue while we only wanted that to happen at the tail of the queue. A simple additional caller side check for tcp_write_queue_tail would likely have resulted 2 x modulos because the limit would have to be first calculated from window, however, doing that unnecessary modulo is not mandatory. After a minor change to the algorithm, simply determine first if the modulo is needed at all and at that point immediately decide also from which value it should be calculated from. This approach also kills some duplicated code. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c51
1 files changed, 25 insertions, 26 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9a9510acb14..9058e0a2510 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1017,13 +1017,29 @@ static void tcp_cwnd_validate(struct sock *sk)
1017 } 1017 }
1018} 1018}
1019 1019
1020static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) 1020/* Returns the portion of skb which can be sent right away without
1021 * introducing MSS oddities to segment boundaries. In rare cases where
1022 * mss_now != mss_cache, we will request caller to create a small skb
1023 * per input skb which could be mostly avoided here (if desired).
1024 */
1025static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
1026 unsigned int mss_now,
1027 unsigned int cwnd)
1021{ 1028{
1022 u32 window, cwnd_len; 1029 struct tcp_sock *tp = tcp_sk(sk);
1030 u32 needed, window, cwnd_len;
1023 1031
1024 window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq); 1032 window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
1025 cwnd_len = mss_now * cwnd; 1033 cwnd_len = mss_now * cwnd;
1026 return min(window, cwnd_len); 1034
1035 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
1036 return cwnd_len;
1037
1038 if (skb == tcp_write_queue_tail(sk) && cwnd_len <= skb->len)
1039 return cwnd_len;
1040
1041 needed = min(skb->len, window);
1042 return needed - needed % mss_now;
1027} 1043}
1028 1044
1029/* Can at least one segment of SKB be sent right now, according to the 1045/* Can at least one segment of SKB be sent right now, according to the
@@ -1458,17 +1474,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1458 } 1474 }
1459 1475
1460 limit = mss_now; 1476 limit = mss_now;
1461 if (tso_segs > 1) { 1477 if (tso_segs > 1)
1462 limit = tcp_window_allows(tp, skb, 1478 limit = tcp_mss_split_point(sk, skb, mss_now,
1463 mss_now, cwnd_quota); 1479 cwnd_quota);
1464
1465 if (skb->len < limit) {
1466 unsigned int trim = skb->len % mss_now;
1467
1468 if (trim)
1469 limit = skb->len - trim;
1470 }
1471 }
1472 1480
1473 if (skb->len > limit && 1481 if (skb->len > limit &&
1474 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1482 unlikely(tso_fragment(sk, skb, limit, mss_now)))
@@ -1515,7 +1523,6 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1515 */ 1523 */
1516void tcp_push_one(struct sock *sk, unsigned int mss_now) 1524void tcp_push_one(struct sock *sk, unsigned int mss_now)
1517{ 1525{
1518 struct tcp_sock *tp = tcp_sk(sk);
1519 struct sk_buff *skb = tcp_send_head(sk); 1526 struct sk_buff *skb = tcp_send_head(sk);
1520 unsigned int tso_segs, cwnd_quota; 1527 unsigned int tso_segs, cwnd_quota;
1521 1528
@@ -1530,17 +1537,9 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
1530 BUG_ON(!tso_segs); 1537 BUG_ON(!tso_segs);
1531 1538
1532 limit = mss_now; 1539 limit = mss_now;
1533 if (tso_segs > 1) { 1540 if (tso_segs > 1)
1534 limit = tcp_window_allows(tp, skb, 1541 limit = tcp_mss_split_point(sk, skb, mss_now,
1535 mss_now, cwnd_quota); 1542 cwnd_quota);
1536
1537 if (skb->len < limit) {
1538 unsigned int trim = skb->len % mss_now;
1539
1540 if (trim)
1541 limit = skb->len - trim;
1542 }
1543 }
1544 1543
1545 if (skb->len > limit && 1544 if (skb->len > limit &&
1546 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1545 unlikely(tso_fragment(sk, skb, limit, mss_now)))