aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorBjorn Helgaas <bhelgaas@google.com>2012-09-13 10:41:01 -0400
committerBjorn Helgaas <bhelgaas@google.com>2012-09-13 10:41:01 -0400
commit78890b5989d96ddce989cde929c45ceeded0fcaf (patch)
tree4e2da81fc7c97f11aee174b1eedac110c9a68b3a /net/ipv4/tcp_output.c
parent1959ec5f82acbdf91425b41600f119ebecb5f6a8 (diff)
parent55d512e245bc7699a8800e23df1a24195dd08217 (diff)
Merge commit 'v3.6-rc5' into next
* commit 'v3.6-rc5': (1098 commits) Linux 3.6-rc5 HID: tpkbd: work even if the new Lenovo Keyboard driver is not configured Remove user-triggerable BUG from mpol_to_str xen/pciback: Fix proper FLR steps. uml: fix compile error in deliver_alarm() dj: memory scribble in logi_dj Fix order of arguments to compat_put_time[spec|val] xen: Use correct masking in xen_swiotlb_alloc_coherent. xen: fix logical error in tlb flushing xen/p2m: Fix one-off error in checking the P2M tree directory. powerpc: Don't use __put_user() in patch_instruction powerpc: Make sure IPI handlers see data written by IPI senders powerpc: Restore correct DSCR in context switch powerpc: Fix DSCR inheritance in copy_thread() powerpc: Keep thread.dscr and thread.dscr_inherit in sync powerpc: Update DSCR on all CPUs when writing sysfs dscr_default powerpc/powernv: Always go into nap mode when CPU is offline powerpc: Give hypervisor decrementer interrupts their own handler powerpc/vphn: Fix arch_update_cpu_topology() return value ARM: gemini: fix the gemini build ... Conflicts: drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c drivers/rapidio/devices/tsi721.c
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c37
1 files changed, 22 insertions, 15 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f1bcff0b10b..d04632673a9e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -910,14 +910,18 @@ void tcp_release_cb(struct sock *sk)
910 if (flags & (1UL << TCP_TSQ_DEFERRED)) 910 if (flags & (1UL << TCP_TSQ_DEFERRED))
911 tcp_tsq_handler(sk); 911 tcp_tsq_handler(sk);
912 912
913 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) 913 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
914 tcp_write_timer_handler(sk); 914 tcp_write_timer_handler(sk);
915 915 __sock_put(sk);
916 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) 916 }
917 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
917 tcp_delack_timer_handler(sk); 918 tcp_delack_timer_handler(sk);
918 919 __sock_put(sk);
919 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) 920 }
921 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
920 sk->sk_prot->mtu_reduced(sk); 922 sk->sk_prot->mtu_reduced(sk);
923 __sock_put(sk);
924 }
921} 925}
922EXPORT_SYMBOL(tcp_release_cb); 926EXPORT_SYMBOL(tcp_release_cb);
923 927
@@ -940,7 +944,7 @@ void __init tcp_tasklet_init(void)
940 * We cant xmit new skbs from this context, as we might already 944 * We cant xmit new skbs from this context, as we might already
941 * hold qdisc lock. 945 * hold qdisc lock.
942 */ 946 */
943void tcp_wfree(struct sk_buff *skb) 947static void tcp_wfree(struct sk_buff *skb)
944{ 948{
945 struct sock *sk = skb->sk; 949 struct sock *sk = skb->sk;
946 struct tcp_sock *tp = tcp_sk(sk); 950 struct tcp_sock *tp = tcp_sk(sk);
@@ -1522,21 +1526,21 @@ static void tcp_cwnd_validate(struct sock *sk)
1522 * when we would be allowed to send the split-due-to-Nagle skb fully. 1526 * when we would be allowed to send the split-due-to-Nagle skb fully.
1523 */ 1527 */
1524static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, 1528static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1525 unsigned int mss_now, unsigned int cwnd) 1529 unsigned int mss_now, unsigned int max_segs)
1526{ 1530{
1527 const struct tcp_sock *tp = tcp_sk(sk); 1531 const struct tcp_sock *tp = tcp_sk(sk);
1528 u32 needed, window, cwnd_len; 1532 u32 needed, window, max_len;
1529 1533
1530 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1534 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1531 cwnd_len = mss_now * cwnd; 1535 max_len = mss_now * max_segs;
1532 1536
1533 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1537 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1534 return cwnd_len; 1538 return max_len;
1535 1539
1536 needed = min(skb->len, window); 1540 needed = min(skb->len, window);
1537 1541
1538 if (cwnd_len <= needed) 1542 if (max_len <= needed)
1539 return cwnd_len; 1543 return max_len;
1540 1544
1541 return needed - needed % mss_now; 1545 return needed - needed % mss_now;
1542} 1546}
@@ -1765,7 +1769,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1765 limit = min(send_win, cong_win); 1769 limit = min(send_win, cong_win);
1766 1770
1767 /* If a full-sized TSO skb can be sent, do it. */ 1771 /* If a full-sized TSO skb can be sent, do it. */
1768 if (limit >= sk->sk_gso_max_size) 1772 if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
1773 sk->sk_gso_max_segs * tp->mss_cache))
1769 goto send_now; 1774 goto send_now;
1770 1775
1771 /* Middle in queue won't get any more data, full sendable already? */ 1776 /* Middle in queue won't get any more data, full sendable already? */
@@ -1999,7 +2004,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1999 limit = mss_now; 2004 limit = mss_now;
2000 if (tso_segs > 1 && !tcp_urg_mode(tp)) 2005 if (tso_segs > 1 && !tcp_urg_mode(tp))
2001 limit = tcp_mss_split_point(sk, skb, mss_now, 2006 limit = tcp_mss_split_point(sk, skb, mss_now,
2002 cwnd_quota); 2007 min_t(unsigned int,
2008 cwnd_quota,
2009 sk->sk_gso_max_segs));
2003 2010
2004 if (skb->len > limit && 2011 if (skb->len > limit &&
2005 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2012 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))