diff options
| author | Olof Johansson <olof@lixom.net> | 2012-09-05 18:33:19 -0400 |
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2012-09-05 18:33:19 -0400 |
| commit | 301fd5c13257862df66c81c48d963d474e63e0ef (patch) | |
| tree | 128af89336dd8b1ee62696c4588057b2eb0b445e /net/ipv4/tcp_output.c | |
| parent | daa56a06b5a592ceb69130a1d15c067a078f2701 (diff) | |
| parent | 7952717adb69efc1d2443a1858f96d23c2fb93e0 (diff) | |
Merge branch 'kzm9g' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas into next/soc
* 'kzm9g' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas:
ARM: shmobile: kzm9g: enable restarting
+ sync to 3.6-rc3
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 37 |
1 files changed, 22 insertions, 15 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f1bcff0b10b..d04632673a9e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -910,14 +910,18 @@ void tcp_release_cb(struct sock *sk) | |||
| 910 | if (flags & (1UL << TCP_TSQ_DEFERRED)) | 910 | if (flags & (1UL << TCP_TSQ_DEFERRED)) |
| 911 | tcp_tsq_handler(sk); | 911 | tcp_tsq_handler(sk); |
| 912 | 912 | ||
| 913 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) | 913 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { |
| 914 | tcp_write_timer_handler(sk); | 914 | tcp_write_timer_handler(sk); |
| 915 | 915 | __sock_put(sk); | |
| 916 | if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) | 916 | } |
| 917 | if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { | ||
| 917 | tcp_delack_timer_handler(sk); | 918 | tcp_delack_timer_handler(sk); |
| 918 | 919 | __sock_put(sk); | |
| 919 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) | 920 | } |
| 921 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { | ||
| 920 | sk->sk_prot->mtu_reduced(sk); | 922 | sk->sk_prot->mtu_reduced(sk); |
| 923 | __sock_put(sk); | ||
| 924 | } | ||
| 921 | } | 925 | } |
| 922 | EXPORT_SYMBOL(tcp_release_cb); | 926 | EXPORT_SYMBOL(tcp_release_cb); |
| 923 | 927 | ||
| @@ -940,7 +944,7 @@ void __init tcp_tasklet_init(void) | |||
| 940 | * We cant xmit new skbs from this context, as we might already | 944 | * We cant xmit new skbs from this context, as we might already |
| 941 | * hold qdisc lock. | 945 | * hold qdisc lock. |
| 942 | */ | 946 | */ |
| 943 | void tcp_wfree(struct sk_buff *skb) | 947 | static void tcp_wfree(struct sk_buff *skb) |
| 944 | { | 948 | { |
| 945 | struct sock *sk = skb->sk; | 949 | struct sock *sk = skb->sk; |
| 946 | struct tcp_sock *tp = tcp_sk(sk); | 950 | struct tcp_sock *tp = tcp_sk(sk); |
| @@ -1522,21 +1526,21 @@ static void tcp_cwnd_validate(struct sock *sk) | |||
| 1522 | * when we would be allowed to send the split-due-to-Nagle skb fully. | 1526 | * when we would be allowed to send the split-due-to-Nagle skb fully. |
| 1523 | */ | 1527 | */ |
| 1524 | static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, | 1528 | static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, |
| 1525 | unsigned int mss_now, unsigned int cwnd) | 1529 | unsigned int mss_now, unsigned int max_segs) |
| 1526 | { | 1530 | { |
| 1527 | const struct tcp_sock *tp = tcp_sk(sk); | 1531 | const struct tcp_sock *tp = tcp_sk(sk); |
| 1528 | u32 needed, window, cwnd_len; | 1532 | u32 needed, window, max_len; |
| 1529 | 1533 | ||
| 1530 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; | 1534 | window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; |
| 1531 | cwnd_len = mss_now * cwnd; | 1535 | max_len = mss_now * max_segs; |
| 1532 | 1536 | ||
| 1533 | if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) | 1537 | if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) |
| 1534 | return cwnd_len; | 1538 | return max_len; |
| 1535 | 1539 | ||
| 1536 | needed = min(skb->len, window); | 1540 | needed = min(skb->len, window); |
| 1537 | 1541 | ||
| 1538 | if (cwnd_len <= needed) | 1542 | if (max_len <= needed) |
| 1539 | return cwnd_len; | 1543 | return max_len; |
| 1540 | 1544 | ||
| 1541 | return needed - needed % mss_now; | 1545 | return needed - needed % mss_now; |
| 1542 | } | 1546 | } |
| @@ -1765,7 +1769,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
| 1765 | limit = min(send_win, cong_win); | 1769 | limit = min(send_win, cong_win); |
| 1766 | 1770 | ||
| 1767 | /* If a full-sized TSO skb can be sent, do it. */ | 1771 | /* If a full-sized TSO skb can be sent, do it. */ |
| 1768 | if (limit >= sk->sk_gso_max_size) | 1772 | if (limit >= min_t(unsigned int, sk->sk_gso_max_size, |
| 1773 | sk->sk_gso_max_segs * tp->mss_cache)) | ||
| 1769 | goto send_now; | 1774 | goto send_now; |
| 1770 | 1775 | ||
| 1771 | /* Middle in queue won't get any more data, full sendable already? */ | 1776 | /* Middle in queue won't get any more data, full sendable already? */ |
| @@ -1999,7 +2004,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
| 1999 | limit = mss_now; | 2004 | limit = mss_now; |
| 2000 | if (tso_segs > 1 && !tcp_urg_mode(tp)) | 2005 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
| 2001 | limit = tcp_mss_split_point(sk, skb, mss_now, | 2006 | limit = tcp_mss_split_point(sk, skb, mss_now, |
| 2002 | cwnd_quota); | 2007 | min_t(unsigned int, |
| 2008 | cwnd_quota, | ||
| 2009 | sk->sk_gso_max_segs)); | ||
| 2003 | 2010 | ||
| 2004 | if (skb->len > limit && | 2011 | if (skb->len > limit && |
| 2005 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) | 2012 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) |
