aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c64
1 files changed, 45 insertions, 19 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8c8d7e06b72f..a369e8a70b2c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2812,39 +2812,65 @@ begin_fwd:
2812 } 2812 }
2813} 2813}
2814 2814
2815/* Send a fin. The caller locks the socket for us. This cannot be 2815/* We allow to exceed memory limits for FIN packets to expedite
2816 * allowed to fail queueing a FIN frame under any circumstances. 2816 * connection tear down and (memory) recovery.
2817 * Otherwise tcp_send_fin() could be tempted to either delay FIN
2818 * or even be forced to close flow without any FIN.
2819 */
2820static void sk_forced_wmem_schedule(struct sock *sk, int size)
2821{
2822 int amt, status;
2823
2824 if (size <= sk->sk_forward_alloc)
2825 return;
2826 amt = sk_mem_pages(size);
2827 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2828 sk_memory_allocated_add(sk, amt, &status);
2829}
2830
2831/* Send a FIN. The caller locks the socket for us.
2832 * We should try to send a FIN packet really hard, but eventually give up.
2817 */ 2833 */
2818void tcp_send_fin(struct sock *sk) 2834void tcp_send_fin(struct sock *sk)
2819{ 2835{
2836 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
2820 struct tcp_sock *tp = tcp_sk(sk); 2837 struct tcp_sock *tp = tcp_sk(sk);
2821 struct sk_buff *skb = tcp_write_queue_tail(sk);
2822 int mss_now;
2823 2838
2824 /* Optimization, tack on the FIN if we have a queue of 2839 /* Optimization, tack on the FIN if we have one skb in write queue and
2825 * unsent frames. But be careful about outgoing SACKS 2840 * this skb was not yet sent, or we are under memory pressure.
2826 * and IP options. 2841 * Note: in the latter case, FIN packet will be sent after a timeout,
2842 * as TCP stack thinks it has already been transmitted.
2827 */ 2843 */
2828 mss_now = tcp_current_mss(sk); 2844 if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
2829 2845coalesce:
2830 if (tcp_send_head(sk)) { 2846 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
2831 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 2847 TCP_SKB_CB(tskb)->end_seq++;
2832 TCP_SKB_CB(skb)->end_seq++;
2833 tp->write_seq++; 2848 tp->write_seq++;
2849 if (!tcp_send_head(sk)) {
2850 /* This means tskb was already sent.
2851 * Pretend we included the FIN on previous transmit.
2852 * We need to set tp->snd_nxt to the value it would have
2853 * if FIN had been sent. This is because retransmit path
2854 * does not change tp->snd_nxt.
2855 */
2856 tp->snd_nxt++;
2857 return;
2858 }
2834 } else { 2859 } else {
2835 /* Socket is locked, keep trying until memory is available. */ 2860 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
2836 for (;;) { 2861 if (unlikely(!skb)) {
2837 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); 2862 if (tskb)
2838 if (skb) 2863 goto coalesce;
2839 break; 2864 return;
2840 yield();
2841 } 2865 }
2866 skb_reserve(skb, MAX_TCP_HEADER);
2867 sk_forced_wmem_schedule(sk, skb->truesize);
2842 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2868 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2843 tcp_init_nondata_skb(skb, tp->write_seq, 2869 tcp_init_nondata_skb(skb, tp->write_seq,
2844 TCPHDR_ACK | TCPHDR_FIN); 2870 TCPHDR_ACK | TCPHDR_FIN);
2845 tcp_queue_skb(sk, skb); 2871 tcp_queue_skb(sk, skb);
2846 } 2872 }
2847 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2873 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
2848} 2874}
2849 2875
2850/* We get here when a process closes a file descriptor (either due to 2876/* We get here when a process closes a file descriptor (either due to