diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 398 |
1 files changed, 160 insertions, 238 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b224eb8bce8b..2fd2bc9e3c64 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -88,12 +88,14 @@ int sysctl_tcp_app_win __read_mostly = 31; | |||
88 | int sysctl_tcp_adv_win_scale __read_mostly = 1; | 88 | int sysctl_tcp_adv_win_scale __read_mostly = 1; |
89 | EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); | 89 | EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); |
90 | 90 | ||
91 | /* rfc5961 challenge ack rate limiting */ | ||
92 | int sysctl_tcp_challenge_ack_limit = 100; | ||
93 | |||
91 | int sysctl_tcp_stdurg __read_mostly; | 94 | int sysctl_tcp_stdurg __read_mostly; |
92 | int sysctl_tcp_rfc1337 __read_mostly; | 95 | int sysctl_tcp_rfc1337 __read_mostly; |
93 | int sysctl_tcp_max_orphans __read_mostly = NR_FILE; | 96 | int sysctl_tcp_max_orphans __read_mostly = NR_FILE; |
94 | int sysctl_tcp_frto __read_mostly = 2; | 97 | int sysctl_tcp_frto __read_mostly = 2; |
95 | int sysctl_tcp_frto_response __read_mostly; | 98 | int sysctl_tcp_frto_response __read_mostly; |
96 | int sysctl_tcp_nometrics_save __read_mostly; | ||
97 | 99 | ||
98 | int sysctl_tcp_thin_dupack __read_mostly; | 100 | int sysctl_tcp_thin_dupack __read_mostly; |
99 | 101 | ||
@@ -701,7 +703,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
701 | /* Calculate rto without backoff. This is the second half of Van Jacobson's | 703 | /* Calculate rto without backoff. This is the second half of Van Jacobson's |
702 | * routine referred to above. | 704 | * routine referred to above. |
703 | */ | 705 | */ |
704 | static inline void tcp_set_rto(struct sock *sk) | 706 | void tcp_set_rto(struct sock *sk) |
705 | { | 707 | { |
706 | const struct tcp_sock *tp = tcp_sk(sk); | 708 | const struct tcp_sock *tp = tcp_sk(sk); |
707 | /* Old crap is replaced with new one. 8) | 709 | /* Old crap is replaced with new one. 8) |
@@ -728,109 +730,6 @@ static inline void tcp_set_rto(struct sock *sk) | |||
728 | tcp_bound_rto(sk); | 730 | tcp_bound_rto(sk); |
729 | } | 731 | } |
730 | 732 | ||
731 | /* Save metrics learned by this TCP session. | ||
732 | This function is called only, when TCP finishes successfully | ||
733 | i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. | ||
734 | */ | ||
735 | void tcp_update_metrics(struct sock *sk) | ||
736 | { | ||
737 | struct tcp_sock *tp = tcp_sk(sk); | ||
738 | struct dst_entry *dst = __sk_dst_get(sk); | ||
739 | |||
740 | if (sysctl_tcp_nometrics_save) | ||
741 | return; | ||
742 | |||
743 | dst_confirm(dst); | ||
744 | |||
745 | if (dst && (dst->flags & DST_HOST)) { | ||
746 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
747 | int m; | ||
748 | unsigned long rtt; | ||
749 | |||
750 | if (icsk->icsk_backoff || !tp->srtt) { | ||
751 | /* This session failed to estimate rtt. Why? | ||
752 | * Probably, no packets returned in time. | ||
753 | * Reset our results. | ||
754 | */ | ||
755 | if (!(dst_metric_locked(dst, RTAX_RTT))) | ||
756 | dst_metric_set(dst, RTAX_RTT, 0); | ||
757 | return; | ||
758 | } | ||
759 | |||
760 | rtt = dst_metric_rtt(dst, RTAX_RTT); | ||
761 | m = rtt - tp->srtt; | ||
762 | |||
763 | /* If newly calculated rtt larger than stored one, | ||
764 | * store new one. Otherwise, use EWMA. Remember, | ||
765 | * rtt overestimation is always better than underestimation. | ||
766 | */ | ||
767 | if (!(dst_metric_locked(dst, RTAX_RTT))) { | ||
768 | if (m <= 0) | ||
769 | set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); | ||
770 | else | ||
771 | set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); | ||
772 | } | ||
773 | |||
774 | if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { | ||
775 | unsigned long var; | ||
776 | if (m < 0) | ||
777 | m = -m; | ||
778 | |||
779 | /* Scale deviation to rttvar fixed point */ | ||
780 | m >>= 1; | ||
781 | if (m < tp->mdev) | ||
782 | m = tp->mdev; | ||
783 | |||
784 | var = dst_metric_rtt(dst, RTAX_RTTVAR); | ||
785 | if (m >= var) | ||
786 | var = m; | ||
787 | else | ||
788 | var -= (var - m) >> 2; | ||
789 | |||
790 | set_dst_metric_rtt(dst, RTAX_RTTVAR, var); | ||
791 | } | ||
792 | |||
793 | if (tcp_in_initial_slowstart(tp)) { | ||
794 | /* Slow start still did not finish. */ | ||
795 | if (dst_metric(dst, RTAX_SSTHRESH) && | ||
796 | !dst_metric_locked(dst, RTAX_SSTHRESH) && | ||
797 | (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) | ||
798 | dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1); | ||
799 | if (!dst_metric_locked(dst, RTAX_CWND) && | ||
800 | tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) | ||
801 | dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd); | ||
802 | } else if (tp->snd_cwnd > tp->snd_ssthresh && | ||
803 | icsk->icsk_ca_state == TCP_CA_Open) { | ||
804 | /* Cong. avoidance phase, cwnd is reliable. */ | ||
805 | if (!dst_metric_locked(dst, RTAX_SSTHRESH)) | ||
806 | dst_metric_set(dst, RTAX_SSTHRESH, | ||
807 | max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); | ||
808 | if (!dst_metric_locked(dst, RTAX_CWND)) | ||
809 | dst_metric_set(dst, RTAX_CWND, | ||
810 | (dst_metric(dst, RTAX_CWND) + | ||
811 | tp->snd_cwnd) >> 1); | ||
812 | } else { | ||
813 | /* Else slow start did not finish, cwnd is non-sense, | ||
814 | ssthresh may be also invalid. | ||
815 | */ | ||
816 | if (!dst_metric_locked(dst, RTAX_CWND)) | ||
817 | dst_metric_set(dst, RTAX_CWND, | ||
818 | (dst_metric(dst, RTAX_CWND) + | ||
819 | tp->snd_ssthresh) >> 1); | ||
820 | if (dst_metric(dst, RTAX_SSTHRESH) && | ||
821 | !dst_metric_locked(dst, RTAX_SSTHRESH) && | ||
822 | tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) | ||
823 | dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh); | ||
824 | } | ||
825 | |||
826 | if (!dst_metric_locked(dst, RTAX_REORDERING)) { | ||
827 | if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && | ||
828 | tp->reordering != sysctl_tcp_reordering) | ||
829 | dst_metric_set(dst, RTAX_REORDERING, tp->reordering); | ||
830 | } | ||
831 | } | ||
832 | } | ||
833 | |||
834 | __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) | 733 | __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) |
835 | { | 734 | { |
836 | __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); | 735 | __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); |
@@ -867,7 +766,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) | |||
867 | * Packet counting of FACK is based on in-order assumptions, therefore TCP | 766 | * Packet counting of FACK is based on in-order assumptions, therefore TCP |
868 | * disables it when reordering is detected | 767 | * disables it when reordering is detected |
869 | */ | 768 | */ |
870 | static void tcp_disable_fack(struct tcp_sock *tp) | 769 | void tcp_disable_fack(struct tcp_sock *tp) |
871 | { | 770 | { |
872 | /* RFC3517 uses different metric in lost marker => reset on change */ | 771 | /* RFC3517 uses different metric in lost marker => reset on change */ |
873 | if (tcp_is_fack(tp)) | 772 | if (tcp_is_fack(tp)) |
@@ -881,86 +780,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp) | |||
881 | tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; | 780 | tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; |
882 | } | 781 | } |
883 | 782 | ||
884 | /* Initialize metrics on socket. */ | ||
885 | |||
886 | static void tcp_init_metrics(struct sock *sk) | ||
887 | { | ||
888 | struct tcp_sock *tp = tcp_sk(sk); | ||
889 | struct dst_entry *dst = __sk_dst_get(sk); | ||
890 | |||
891 | if (dst == NULL) | ||
892 | goto reset; | ||
893 | |||
894 | dst_confirm(dst); | ||
895 | |||
896 | if (dst_metric_locked(dst, RTAX_CWND)) | ||
897 | tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); | ||
898 | if (dst_metric(dst, RTAX_SSTHRESH)) { | ||
899 | tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); | ||
900 | if (tp->snd_ssthresh > tp->snd_cwnd_clamp) | ||
901 | tp->snd_ssthresh = tp->snd_cwnd_clamp; | ||
902 | } else { | ||
903 | /* ssthresh may have been reduced unnecessarily during. | ||
904 | * 3WHS. Restore it back to its initial default. | ||
905 | */ | ||
906 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | ||
907 | } | ||
908 | if (dst_metric(dst, RTAX_REORDERING) && | ||
909 | tp->reordering != dst_metric(dst, RTAX_REORDERING)) { | ||
910 | tcp_disable_fack(tp); | ||
911 | tcp_disable_early_retrans(tp); | ||
912 | tp->reordering = dst_metric(dst, RTAX_REORDERING); | ||
913 | } | ||
914 | |||
915 | if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0) | ||
916 | goto reset; | ||
917 | |||
918 | /* Initial rtt is determined from SYN,SYN-ACK. | ||
919 | * The segment is small and rtt may appear much | ||
920 | * less than real one. Use per-dst memory | ||
921 | * to make it more realistic. | ||
922 | * | ||
923 | * A bit of theory. RTT is time passed after "normal" sized packet | ||
924 | * is sent until it is ACKed. In normal circumstances sending small | ||
925 | * packets force peer to delay ACKs and calculation is correct too. | ||
926 | * The algorithm is adaptive and, provided we follow specs, it | ||
927 | * NEVER underestimate RTT. BUT! If peer tries to make some clever | ||
928 | * tricks sort of "quick acks" for time long enough to decrease RTT | ||
929 | * to low value, and then abruptly stops to do it and starts to delay | ||
930 | * ACKs, wait for troubles. | ||
931 | */ | ||
932 | if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { | ||
933 | tp->srtt = dst_metric_rtt(dst, RTAX_RTT); | ||
934 | tp->rtt_seq = tp->snd_nxt; | ||
935 | } | ||
936 | if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { | ||
937 | tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); | ||
938 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); | ||
939 | } | ||
940 | tcp_set_rto(sk); | ||
941 | reset: | ||
942 | if (tp->srtt == 0) { | ||
943 | /* RFC6298: 5.7 We've failed to get a valid RTT sample from | ||
944 | * 3WHS. This is most likely due to retransmission, | ||
945 | * including spurious one. Reset the RTO back to 3secs | ||
946 | * from the more aggressive 1sec to avoid more spurious | ||
947 | * retransmission. | ||
948 | */ | ||
949 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; | ||
950 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; | ||
951 | } | ||
952 | /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been | ||
953 | * retransmitted. In light of RFC6298 more aggressive 1sec | ||
954 | * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK | ||
955 | * retransmission has occurred. | ||
956 | */ | ||
957 | if (tp->total_retrans > 1) | ||
958 | tp->snd_cwnd = 1; | ||
959 | else | ||
960 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); | ||
961 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
962 | } | ||
963 | |||
964 | static void tcp_update_reordering(struct sock *sk, const int metric, | 783 | static void tcp_update_reordering(struct sock *sk, const int metric, |
965 | const int ts) | 784 | const int ts) |
966 | { | 785 | { |
@@ -2702,7 +2521,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag) | |||
2702 | /* Nothing was retransmitted or returned timestamp is less | 2521 | /* Nothing was retransmitted or returned timestamp is less |
2703 | * than timestamp of the first retransmission. | 2522 | * than timestamp of the first retransmission. |
2704 | */ | 2523 | */ |
2705 | static inline int tcp_packet_delayed(const struct tcp_sock *tp) | 2524 | static inline bool tcp_packet_delayed(const struct tcp_sock *tp) |
2706 | { | 2525 | { |
2707 | return !tp->retrans_stamp || | 2526 | return !tp->retrans_stamp || |
2708 | (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && | 2527 | (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && |
@@ -2763,7 +2582,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) | |||
2763 | tp->snd_cwnd_stamp = tcp_time_stamp; | 2582 | tp->snd_cwnd_stamp = tcp_time_stamp; |
2764 | } | 2583 | } |
2765 | 2584 | ||
2766 | static inline int tcp_may_undo(const struct tcp_sock *tp) | 2585 | static inline bool tcp_may_undo(const struct tcp_sock *tp) |
2767 | { | 2586 | { |
2768 | return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); | 2587 | return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); |
2769 | } | 2588 | } |
@@ -3552,13 +3371,13 @@ static void tcp_ack_probe(struct sock *sk) | |||
3552 | } | 3371 | } |
3553 | } | 3372 | } |
3554 | 3373 | ||
3555 | static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) | 3374 | static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) |
3556 | { | 3375 | { |
3557 | return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || | 3376 | return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || |
3558 | inet_csk(sk)->icsk_ca_state != TCP_CA_Open; | 3377 | inet_csk(sk)->icsk_ca_state != TCP_CA_Open; |
3559 | } | 3378 | } |
3560 | 3379 | ||
3561 | static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) | 3380 | static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) |
3562 | { | 3381 | { |
3563 | const struct tcp_sock *tp = tcp_sk(sk); | 3382 | const struct tcp_sock *tp = tcp_sk(sk); |
3564 | return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && | 3383 | return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && |
@@ -3568,7 +3387,7 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) | |||
3568 | /* Check that window update is acceptable. | 3387 | /* Check that window update is acceptable. |
3569 | * The function assumes that snd_una<=ack<=snd_next. | 3388 | * The function assumes that snd_una<=ack<=snd_next. |
3570 | */ | 3389 | */ |
3571 | static inline int tcp_may_update_window(const struct tcp_sock *tp, | 3390 | static inline bool tcp_may_update_window(const struct tcp_sock *tp, |
3572 | const u32 ack, const u32 ack_seq, | 3391 | const u32 ack, const u32 ack_seq, |
3573 | const u32 nwin) | 3392 | const u32 nwin) |
3574 | { | 3393 | { |
@@ -3869,9 +3688,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3869 | tcp_cong_avoid(sk, ack, prior_in_flight); | 3688 | tcp_cong_avoid(sk, ack, prior_in_flight); |
3870 | } | 3689 | } |
3871 | 3690 | ||
3872 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) | 3691 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { |
3873 | dst_confirm(__sk_dst_get(sk)); | 3692 | struct dst_entry *dst = __sk_dst_get(sk); |
3874 | 3693 | if (dst) | |
3694 | dst_confirm(dst); | ||
3695 | } | ||
3875 | return 1; | 3696 | return 1; |
3876 | 3697 | ||
3877 | no_queue: | 3698 | no_queue: |
@@ -3911,7 +3732,8 @@ old_ack: | |||
3911 | * the fast version below fails. | 3732 | * the fast version below fails. |
3912 | */ | 3733 | */ |
3913 | void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, | 3734 | void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, |
3914 | const u8 **hvpp, int estab) | 3735 | const u8 **hvpp, int estab, |
3736 | struct tcp_fastopen_cookie *foc) | ||
3915 | { | 3737 | { |
3916 | const unsigned char *ptr; | 3738 | const unsigned char *ptr; |
3917 | const struct tcphdr *th = tcp_hdr(skb); | 3739 | const struct tcphdr *th = tcp_hdr(skb); |
@@ -4018,8 +3840,25 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o | |||
4018 | break; | 3840 | break; |
4019 | } | 3841 | } |
4020 | break; | 3842 | break; |
4021 | } | ||
4022 | 3843 | ||
3844 | case TCPOPT_EXP: | ||
3845 | /* Fast Open option shares code 254 using a | ||
3846 | * 16 bits magic number. It's valid only in | ||
3847 | * SYN or SYN-ACK with an even size. | ||
3848 | */ | ||
3849 | if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || | ||
3850 | get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || | ||
3851 | foc == NULL || !th->syn || (opsize & 1)) | ||
3852 | break; | ||
3853 | foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; | ||
3854 | if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && | ||
3855 | foc->len <= TCP_FASTOPEN_COOKIE_MAX) | ||
3856 | memcpy(foc->val, ptr + 2, foc->len); | ||
3857 | else if (foc->len != 0) | ||
3858 | foc->len = -1; | ||
3859 | break; | ||
3860 | |||
3861 | } | ||
4023 | ptr += opsize-2; | 3862 | ptr += opsize-2; |
4024 | length -= opsize; | 3863 | length -= opsize; |
4025 | } | 3864 | } |
@@ -4061,7 +3900,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb, | |||
4061 | if (tcp_parse_aligned_timestamp(tp, th)) | 3900 | if (tcp_parse_aligned_timestamp(tp, th)) |
4062 | return true; | 3901 | return true; |
4063 | } | 3902 | } |
4064 | tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); | 3903 | tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); |
4065 | return true; | 3904 | return true; |
4066 | } | 3905 | } |
4067 | 3906 | ||
@@ -4167,7 +4006,7 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) | |||
4167 | (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); | 4006 | (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); |
4168 | } | 4007 | } |
4169 | 4008 | ||
4170 | static inline int tcp_paws_discard(const struct sock *sk, | 4009 | static inline bool tcp_paws_discard(const struct sock *sk, |
4171 | const struct sk_buff *skb) | 4010 | const struct sk_buff *skb) |
4172 | { | 4011 | { |
4173 | const struct tcp_sock *tp = tcp_sk(sk); | 4012 | const struct tcp_sock *tp = tcp_sk(sk); |
@@ -4189,7 +4028,7 @@ static inline int tcp_paws_discard(const struct sock *sk, | |||
4189 | * (borrowed from freebsd) | 4028 | * (borrowed from freebsd) |
4190 | */ | 4029 | */ |
4191 | 4030 | ||
4192 | static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) | 4031 | static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) |
4193 | { | 4032 | { |
4194 | return !before(end_seq, tp->rcv_wup) && | 4033 | return !before(end_seq, tp->rcv_wup) && |
4195 | !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); | 4034 | !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); |
@@ -4512,19 +4351,20 @@ static void tcp_ofo_queue(struct sock *sk) | |||
4512 | static bool tcp_prune_ofo_queue(struct sock *sk); | 4351 | static bool tcp_prune_ofo_queue(struct sock *sk); |
4513 | static int tcp_prune_queue(struct sock *sk); | 4352 | static int tcp_prune_queue(struct sock *sk); |
4514 | 4353 | ||
4515 | static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) | 4354 | static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, |
4355 | unsigned int size) | ||
4516 | { | 4356 | { |
4517 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | 4357 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || |
4518 | !sk_rmem_schedule(sk, size)) { | 4358 | !sk_rmem_schedule(sk, skb, size)) { |
4519 | 4359 | ||
4520 | if (tcp_prune_queue(sk) < 0) | 4360 | if (tcp_prune_queue(sk) < 0) |
4521 | return -1; | 4361 | return -1; |
4522 | 4362 | ||
4523 | if (!sk_rmem_schedule(sk, size)) { | 4363 | if (!sk_rmem_schedule(sk, skb, size)) { |
4524 | if (!tcp_prune_ofo_queue(sk)) | 4364 | if (!tcp_prune_ofo_queue(sk)) |
4525 | return -1; | 4365 | return -1; |
4526 | 4366 | ||
4527 | if (!sk_rmem_schedule(sk, size)) | 4367 | if (!sk_rmem_schedule(sk, skb, size)) |
4528 | return -1; | 4368 | return -1; |
4529 | } | 4369 | } |
4530 | } | 4370 | } |
@@ -4579,8 +4419,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4579 | 4419 | ||
4580 | TCP_ECN_check_ce(tp, skb); | 4420 | TCP_ECN_check_ce(tp, skb); |
4581 | 4421 | ||
4582 | if (tcp_try_rmem_schedule(sk, skb->truesize)) { | 4422 | if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { |
4583 | /* TODO: should increment a counter */ | 4423 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); |
4584 | __kfree_skb(skb); | 4424 | __kfree_skb(skb); |
4585 | return; | 4425 | return; |
4586 | } | 4426 | } |
@@ -4589,6 +4429,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4589 | tp->pred_flags = 0; | 4429 | tp->pred_flags = 0; |
4590 | inet_csk_schedule_ack(sk); | 4430 | inet_csk_schedule_ack(sk); |
4591 | 4431 | ||
4432 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); | ||
4592 | SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", | 4433 | SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", |
4593 | tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); | 4434 | tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); |
4594 | 4435 | ||
@@ -4642,6 +4483,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4642 | if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { | 4483 | if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { |
4643 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4484 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
4644 | /* All the bits are present. Drop. */ | 4485 | /* All the bits are present. Drop. */ |
4486 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); | ||
4645 | __kfree_skb(skb); | 4487 | __kfree_skb(skb); |
4646 | skb = NULL; | 4488 | skb = NULL; |
4647 | tcp_dsack_set(sk, seq, end_seq); | 4489 | tcp_dsack_set(sk, seq, end_seq); |
@@ -4680,6 +4522,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) | |||
4680 | __skb_unlink(skb1, &tp->out_of_order_queue); | 4522 | __skb_unlink(skb1, &tp->out_of_order_queue); |
4681 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, | 4523 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, |
4682 | TCP_SKB_CB(skb1)->end_seq); | 4524 | TCP_SKB_CB(skb1)->end_seq); |
4525 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); | ||
4683 | __kfree_skb(skb1); | 4526 | __kfree_skb(skb1); |
4684 | } | 4527 | } |
4685 | 4528 | ||
@@ -4710,17 +4553,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int | |||
4710 | 4553 | ||
4711 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | 4554 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) |
4712 | { | 4555 | { |
4713 | struct sk_buff *skb; | 4556 | struct sk_buff *skb = NULL; |
4714 | struct tcphdr *th; | 4557 | struct tcphdr *th; |
4715 | bool fragstolen; | 4558 | bool fragstolen; |
4716 | 4559 | ||
4717 | if (tcp_try_rmem_schedule(sk, size + sizeof(*th))) | ||
4718 | goto err; | ||
4719 | |||
4720 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); | 4560 | skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); |
4721 | if (!skb) | 4561 | if (!skb) |
4722 | goto err; | 4562 | goto err; |
4723 | 4563 | ||
4564 | if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th))) | ||
4565 | goto err_free; | ||
4566 | |||
4724 | th = (struct tcphdr *)skb_put(skb, sizeof(*th)); | 4567 | th = (struct tcphdr *)skb_put(skb, sizeof(*th)); |
4725 | skb_reset_transport_header(skb); | 4568 | skb_reset_transport_header(skb); |
4726 | memset(th, 0, sizeof(*th)); | 4569 | memset(th, 0, sizeof(*th)); |
@@ -4791,7 +4634,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | |||
4791 | if (eaten <= 0) { | 4634 | if (eaten <= 0) { |
4792 | queue_and_out: | 4635 | queue_and_out: |
4793 | if (eaten < 0 && | 4636 | if (eaten < 0 && |
4794 | tcp_try_rmem_schedule(sk, skb->truesize)) | 4637 | tcp_try_rmem_schedule(sk, skb, skb->truesize)) |
4795 | goto drop; | 4638 | goto drop; |
4796 | 4639 | ||
4797 | eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); | 4640 | eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); |
@@ -5372,7 +5215,7 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk, | |||
5372 | return result; | 5215 | return result; |
5373 | } | 5216 | } |
5374 | 5217 | ||
5375 | static inline int tcp_checksum_complete_user(struct sock *sk, | 5218 | static inline bool tcp_checksum_complete_user(struct sock *sk, |
5376 | struct sk_buff *skb) | 5219 | struct sk_buff *skb) |
5377 | { | 5220 | { |
5378 | return !skb_csum_unnecessary(skb) && | 5221 | return !skb_csum_unnecessary(skb) && |
@@ -5426,11 +5269,28 @@ out: | |||
5426 | } | 5269 | } |
5427 | #endif /* CONFIG_NET_DMA */ | 5270 | #endif /* CONFIG_NET_DMA */ |
5428 | 5271 | ||
5272 | static void tcp_send_challenge_ack(struct sock *sk) | ||
5273 | { | ||
5274 | /* unprotected vars, we dont care of overwrites */ | ||
5275 | static u32 challenge_timestamp; | ||
5276 | static unsigned int challenge_count; | ||
5277 | u32 now = jiffies / HZ; | ||
5278 | |||
5279 | if (now != challenge_timestamp) { | ||
5280 | challenge_timestamp = now; | ||
5281 | challenge_count = 0; | ||
5282 | } | ||
5283 | if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { | ||
5284 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); | ||
5285 | tcp_send_ack(sk); | ||
5286 | } | ||
5287 | } | ||
5288 | |||
5429 | /* Does PAWS and seqno based validation of an incoming segment, flags will | 5289 | /* Does PAWS and seqno based validation of an incoming segment, flags will |
5430 | * play significant role here. | 5290 | * play significant role here. |
5431 | */ | 5291 | */ |
5432 | static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | 5292 | static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, |
5433 | const struct tcphdr *th, int syn_inerr) | 5293 | const struct tcphdr *th, int syn_inerr) |
5434 | { | 5294 | { |
5435 | const u8 *hash_location; | 5295 | const u8 *hash_location; |
5436 | struct tcp_sock *tp = tcp_sk(sk); | 5296 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -5455,14 +5315,26 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | |||
5455 | * an acknowledgment should be sent in reply (unless the RST | 5315 | * an acknowledgment should be sent in reply (unless the RST |
5456 | * bit is set, if so drop the segment and return)". | 5316 | * bit is set, if so drop the segment and return)". |
5457 | */ | 5317 | */ |
5458 | if (!th->rst) | 5318 | if (!th->rst) { |
5319 | if (th->syn) | ||
5320 | goto syn_challenge; | ||
5459 | tcp_send_dupack(sk, skb); | 5321 | tcp_send_dupack(sk, skb); |
5322 | } | ||
5460 | goto discard; | 5323 | goto discard; |
5461 | } | 5324 | } |
5462 | 5325 | ||
5463 | /* Step 2: check RST bit */ | 5326 | /* Step 2: check RST bit */ |
5464 | if (th->rst) { | 5327 | if (th->rst) { |
5465 | tcp_reset(sk); | 5328 | /* RFC 5961 3.2 : |
5329 | * If sequence number exactly matches RCV.NXT, then | ||
5330 | * RESET the connection | ||
5331 | * else | ||
5332 | * Send a challenge ACK | ||
5333 | */ | ||
5334 | if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) | ||
5335 | tcp_reset(sk); | ||
5336 | else | ||
5337 | tcp_send_challenge_ack(sk); | ||
5466 | goto discard; | 5338 | goto discard; |
5467 | } | 5339 | } |
5468 | 5340 | ||
@@ -5473,20 +5345,23 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, | |||
5473 | 5345 | ||
5474 | /* step 3: check security and precedence [ignored] */ | 5346 | /* step 3: check security and precedence [ignored] */ |
5475 | 5347 | ||
5476 | /* step 4: Check for a SYN in window. */ | 5348 | /* step 4: Check for a SYN |
5477 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { | 5349 | * RFC 5691 4.2 : Send a challenge ack |
5350 | */ | ||
5351 | if (th->syn) { | ||
5352 | syn_challenge: | ||
5478 | if (syn_inerr) | 5353 | if (syn_inerr) |
5479 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); | 5354 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); |
5480 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); | 5355 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); |
5481 | tcp_reset(sk); | 5356 | tcp_send_challenge_ack(sk); |
5482 | return -1; | 5357 | goto discard; |
5483 | } | 5358 | } |
5484 | 5359 | ||
5485 | return 1; | 5360 | return true; |
5486 | 5361 | ||
5487 | discard: | 5362 | discard: |
5488 | __kfree_skb(skb); | 5363 | __kfree_skb(skb); |
5489 | return 0; | 5364 | return false; |
5490 | } | 5365 | } |
5491 | 5366 | ||
5492 | /* | 5367 | /* |
@@ -5516,7 +5391,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5516 | const struct tcphdr *th, unsigned int len) | 5391 | const struct tcphdr *th, unsigned int len) |
5517 | { | 5392 | { |
5518 | struct tcp_sock *tp = tcp_sk(sk); | 5393 | struct tcp_sock *tp = tcp_sk(sk); |
5519 | int res; | ||
5520 | 5394 | ||
5521 | /* | 5395 | /* |
5522 | * Header prediction. | 5396 | * Header prediction. |
@@ -5602,7 +5476,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5602 | if (tp->copied_seq == tp->rcv_nxt && | 5476 | if (tp->copied_seq == tp->rcv_nxt && |
5603 | len - tcp_header_len <= tp->ucopy.len) { | 5477 | len - tcp_header_len <= tp->ucopy.len) { |
5604 | #ifdef CONFIG_NET_DMA | 5478 | #ifdef CONFIG_NET_DMA |
5605 | if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { | 5479 | if (tp->ucopy.task == current && |
5480 | sock_owned_by_user(sk) && | ||
5481 | tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { | ||
5606 | copied_early = 1; | 5482 | copied_early = 1; |
5607 | eaten = 1; | 5483 | eaten = 1; |
5608 | } | 5484 | } |
@@ -5693,9 +5569,8 @@ slow_path: | |||
5693 | * Standard slow path. | 5569 | * Standard slow path. |
5694 | */ | 5570 | */ |
5695 | 5571 | ||
5696 | res = tcp_validate_incoming(sk, skb, th, 1); | 5572 | if (!tcp_validate_incoming(sk, skb, th, 1)) |
5697 | if (res <= 0) | 5573 | return 0; |
5698 | return -res; | ||
5699 | 5574 | ||
5700 | step5: | 5575 | step5: |
5701 | if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) | 5576 | if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) |
@@ -5729,8 +5604,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) | |||
5729 | 5604 | ||
5730 | tcp_set_state(sk, TCP_ESTABLISHED); | 5605 | tcp_set_state(sk, TCP_ESTABLISHED); |
5731 | 5606 | ||
5732 | if (skb != NULL) | 5607 | if (skb != NULL) { |
5608 | inet_sk_rx_dst_set(sk, skb); | ||
5733 | security_inet_conn_established(sk, skb); | 5609 | security_inet_conn_established(sk, skb); |
5610 | } | ||
5734 | 5611 | ||
5735 | /* Make sure socket is routed, for correct metrics. */ | 5612 | /* Make sure socket is routed, for correct metrics. */ |
5736 | icsk->icsk_af_ops->rebuild_header(sk); | 5613 | icsk->icsk_af_ops->rebuild_header(sk); |
@@ -5760,6 +5637,45 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) | |||
5760 | } | 5637 | } |
5761 | } | 5638 | } |
5762 | 5639 | ||
5640 | static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, | ||
5641 | struct tcp_fastopen_cookie *cookie) | ||
5642 | { | ||
5643 | struct tcp_sock *tp = tcp_sk(sk); | ||
5644 | struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; | ||
5645 | u16 mss = tp->rx_opt.mss_clamp; | ||
5646 | bool syn_drop; | ||
5647 | |||
5648 | if (mss == tp->rx_opt.user_mss) { | ||
5649 | struct tcp_options_received opt; | ||
5650 | const u8 *hash_location; | ||
5651 | |||
5652 | /* Get original SYNACK MSS value if user MSS sets mss_clamp */ | ||
5653 | tcp_clear_options(&opt); | ||
5654 | opt.user_mss = opt.mss_clamp = 0; | ||
5655 | tcp_parse_options(synack, &opt, &hash_location, 0, NULL); | ||
5656 | mss = opt.mss_clamp; | ||
5657 | } | ||
5658 | |||
5659 | if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */ | ||
5660 | cookie->len = -1; | ||
5661 | |||
5662 | /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably | ||
5663 | * the remote receives only the retransmitted (regular) SYNs: either | ||
5664 | * the original SYN-data or the corresponding SYN-ACK is lost. | ||
5665 | */ | ||
5666 | syn_drop = (cookie->len <= 0 && data && | ||
5667 | inet_csk(sk)->icsk_retransmits); | ||
5668 | |||
5669 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); | ||
5670 | |||
5671 | if (data) { /* Retransmit unacked data in SYN */ | ||
5672 | tcp_retransmit_skb(sk, data); | ||
5673 | tcp_rearm_rto(sk); | ||
5674 | return true; | ||
5675 | } | ||
5676 | return false; | ||
5677 | } | ||
5678 | |||
5763 | static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | 5679 | static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, |
5764 | const struct tcphdr *th, unsigned int len) | 5680 | const struct tcphdr *th, unsigned int len) |
5765 | { | 5681 | { |
@@ -5767,9 +5683,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5767 | struct inet_connection_sock *icsk = inet_csk(sk); | 5683 | struct inet_connection_sock *icsk = inet_csk(sk); |
5768 | struct tcp_sock *tp = tcp_sk(sk); | 5684 | struct tcp_sock *tp = tcp_sk(sk); |
5769 | struct tcp_cookie_values *cvp = tp->cookie_values; | 5685 | struct tcp_cookie_values *cvp = tp->cookie_values; |
5686 | struct tcp_fastopen_cookie foc = { .len = -1 }; | ||
5770 | int saved_clamp = tp->rx_opt.mss_clamp; | 5687 | int saved_clamp = tp->rx_opt.mss_clamp; |
5771 | 5688 | ||
5772 | tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); | 5689 | tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc); |
5773 | 5690 | ||
5774 | if (th->ack) { | 5691 | if (th->ack) { |
5775 | /* rfc793: | 5692 | /* rfc793: |
@@ -5779,11 +5696,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5779 | * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send | 5696 | * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send |
5780 | * a reset (unless the RST bit is set, if so drop | 5697 | * a reset (unless the RST bit is set, if so drop |
5781 | * the segment and return)" | 5698 | * the segment and return)" |
5782 | * | ||
5783 | * We do not send data with SYN, so that RFC-correct | ||
5784 | * test reduces to: | ||
5785 | */ | 5699 | */ |
5786 | if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) | 5700 | if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || |
5701 | after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) | ||
5787 | goto reset_and_undo; | 5702 | goto reset_and_undo; |
5788 | 5703 | ||
5789 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && | 5704 | if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && |
@@ -5895,6 +5810,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
5895 | 5810 | ||
5896 | tcp_finish_connect(sk, skb); | 5811 | tcp_finish_connect(sk, skb); |
5897 | 5812 | ||
5813 | if ((tp->syn_fastopen || tp->syn_data) && | ||
5814 | tcp_rcv_fastopen_synack(sk, skb, &foc)) | ||
5815 | return -1; | ||
5816 | |||
5898 | if (sk->sk_write_pending || | 5817 | if (sk->sk_write_pending || |
5899 | icsk->icsk_accept_queue.rskq_defer_accept || | 5818 | icsk->icsk_accept_queue.rskq_defer_accept || |
5900 | icsk->icsk_ack.pingpong) { | 5819 | icsk->icsk_ack.pingpong) { |
@@ -6013,7 +5932,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
6013 | struct tcp_sock *tp = tcp_sk(sk); | 5932 | struct tcp_sock *tp = tcp_sk(sk); |
6014 | struct inet_connection_sock *icsk = inet_csk(sk); | 5933 | struct inet_connection_sock *icsk = inet_csk(sk); |
6015 | int queued = 0; | 5934 | int queued = 0; |
6016 | int res; | ||
6017 | 5935 | ||
6018 | tp->rx_opt.saw_tstamp = 0; | 5936 | tp->rx_opt.saw_tstamp = 0; |
6019 | 5937 | ||
@@ -6068,9 +5986,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
6068 | return 0; | 5986 | return 0; |
6069 | } | 5987 | } |
6070 | 5988 | ||
6071 | res = tcp_validate_incoming(sk, skb, th, 0); | 5989 | if (!tcp_validate_incoming(sk, skb, th, 0)) |
6072 | if (res <= 0) | 5990 | return 0; |
6073 | return -res; | ||
6074 | 5991 | ||
6075 | /* step 5: check the ACK field */ | 5992 | /* step 5: check the ACK field */ |
6076 | if (th->ack) { | 5993 | if (th->ack) { |
@@ -6126,9 +6043,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
6126 | 6043 | ||
6127 | case TCP_FIN_WAIT1: | 6044 | case TCP_FIN_WAIT1: |
6128 | if (tp->snd_una == tp->write_seq) { | 6045 | if (tp->snd_una == tp->write_seq) { |
6046 | struct dst_entry *dst; | ||
6047 | |||
6129 | tcp_set_state(sk, TCP_FIN_WAIT2); | 6048 | tcp_set_state(sk, TCP_FIN_WAIT2); |
6130 | sk->sk_shutdown |= SEND_SHUTDOWN; | 6049 | sk->sk_shutdown |= SEND_SHUTDOWN; |
6131 | dst_confirm(__sk_dst_get(sk)); | 6050 | |
6051 | dst = __sk_dst_get(sk); | ||
6052 | if (dst) | ||
6053 | dst_confirm(dst); | ||
6132 | 6054 | ||
6133 | if (!sock_flag(sk, SOCK_DEAD)) | 6055 | if (!sock_flag(sk, SOCK_DEAD)) |
6134 | /* Wake up lingering close() */ | 6056 | /* Wake up lingering close() */ |