aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3be16727f058..17a11e65e57f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -767,6 +767,17 @@ void tcp_release_cb(struct sock *sk)
767 if (flags & (1UL << TCP_TSQ_DEFERRED)) 767 if (flags & (1UL << TCP_TSQ_DEFERRED))
768 tcp_tsq_handler(sk); 768 tcp_tsq_handler(sk);
769 769
770 /* Here begins the tricky part :
771 * We are called from release_sock() with :
772 * 1) BH disabled
773 * 2) sk_lock.slock spinlock held
774 * 3) socket owned by us (sk->sk_lock.owned == 1)
775 *
776 * But following code is meant to be called from BH handlers,
777 * so we should keep BH disabled, but early release socket ownership
778 */
779 sock_release_ownership(sk);
780
770 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { 781 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
771 tcp_write_timer_handler(sk); 782 tcp_write_timer_handler(sk);
772 __sock_put(sk); 783 __sock_put(sk);
@@ -864,8 +875,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
864 875
865 if (unlikely(skb->fclone == SKB_FCLONE_ORIG && 876 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
866 fclone->fclone == SKB_FCLONE_CLONE)) 877 fclone->fclone == SKB_FCLONE_CLONE))
867 NET_INC_STATS_BH(sock_net(sk), 878 NET_INC_STATS(sock_net(sk),
868 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 879 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
869 880
870 if (unlikely(skb_cloned(skb))) 881 if (unlikely(skb_cloned(skb)))
871 skb = pskb_copy(skb, gfp_mask); 882 skb = pskb_copy(skb, gfp_mask);
@@ -2337,6 +2348,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2337 struct tcp_sock *tp = tcp_sk(sk); 2348 struct tcp_sock *tp = tcp_sk(sk);
2338 struct inet_connection_sock *icsk = inet_csk(sk); 2349 struct inet_connection_sock *icsk = inet_csk(sk);
2339 unsigned int cur_mss; 2350 unsigned int cur_mss;
2351 int err;
2340 2352
2341 /* Inconslusive MTU probe */ 2353 /* Inconslusive MTU probe */
2342 if (icsk->icsk_mtup.probe_size) { 2354 if (icsk->icsk_mtup.probe_size) {
@@ -2400,11 +2412,15 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2400 skb_headroom(skb) >= 0xFFFF)) { 2412 skb_headroom(skb) >= 0xFFFF)) {
2401 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2413 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2402 GFP_ATOMIC); 2414 GFP_ATOMIC);
2403 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2415 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2404 -ENOBUFS; 2416 -ENOBUFS;
2405 } else { 2417 } else {
2406 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2418 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2407 } 2419 }
2420
2421 if (likely(!err))
2422 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2423 return err;
2408} 2424}
2409 2425
2410int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2426int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
@@ -2908,7 +2924,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2908 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 2924 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
2909 MAX_TCP_OPTION_SPACE; 2925 MAX_TCP_OPTION_SPACE;
2910 2926
2911 syn_data = skb_copy_expand(syn, skb_headroom(syn), space, 2927 space = min_t(size_t, space, fo->size);
2928
2929 /* limit to order-0 allocations */
2930 space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
2931
2932 syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
2912 sk->sk_allocation); 2933 sk->sk_allocation);
2913 if (syn_data == NULL) 2934 if (syn_data == NULL)
2914 goto fallback; 2935 goto fallback;