diff options
| author | Anton Altaparmakov <aia21@cantab.net> | 2006-01-19 11:39:33 -0500 |
|---|---|---|
| committer | Anton Altaparmakov <aia21@cantab.net> | 2006-01-19 11:39:33 -0500 |
| commit | 944d79559d154c12becde0dab327016cf438f46c (patch) | |
| tree | 50c101806f4d3b6585222dda060559eb4f3e005a /net/ipv4/tcp_input.c | |
| parent | d087e4bdd24ebe3ae3d0b265b6573ec901af4b4b (diff) | |
| parent | 0f36b018b2e314d45af86449f1a97facb1fbe300 (diff) | |
Merge branch 'master' of /usr/src/ntfs-2.6/
Diffstat (limited to 'net/ipv4/tcp_input.c')
| -rw-r--r-- | net/ipv4/tcp_input.c | 101 |
1 files changed, 70 insertions, 31 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bf2e23086bce..a97ed5416c28 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -115,8 +115,8 @@ int sysctl_tcp_abc = 1; | |||
| 115 | /* Adapt the MSS value used to make delayed ack decision to the | 115 | /* Adapt the MSS value used to make delayed ack decision to the |
| 116 | * real world. | 116 | * real world. |
| 117 | */ | 117 | */ |
| 118 | static inline void tcp_measure_rcv_mss(struct sock *sk, | 118 | static void tcp_measure_rcv_mss(struct sock *sk, |
| 119 | const struct sk_buff *skb) | 119 | const struct sk_buff *skb) |
| 120 | { | 120 | { |
| 121 | struct inet_connection_sock *icsk = inet_csk(sk); | 121 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 122 | const unsigned int lss = icsk->icsk_ack.last_seg_size; | 122 | const unsigned int lss = icsk->icsk_ack.last_seg_size; |
| @@ -246,8 +246,8 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, | |||
| 246 | return 0; | 246 | return 0; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | 249 | static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, |
| 250 | struct sk_buff *skb) | 250 | struct sk_buff *skb) |
| 251 | { | 251 | { |
| 252 | /* Check #1 */ | 252 | /* Check #1 */ |
| 253 | if (tp->rcv_ssthresh < tp->window_clamp && | 253 | if (tp->rcv_ssthresh < tp->window_clamp && |
| @@ -341,6 +341,26 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) | |||
| 341 | tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss); | 341 | tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss); |
| 342 | } | 342 | } |
| 343 | 343 | ||
| 344 | |||
| 345 | /* Initialize RCV_MSS value. | ||
| 346 | * RCV_MSS is an our guess about MSS used by the peer. | ||
| 347 | * We haven't any direct information about the MSS. | ||
| 348 | * It's better to underestimate the RCV_MSS rather than overestimate. | ||
| 349 | * Overestimations make us ACKing less frequently than needed. | ||
| 350 | * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). | ||
| 351 | */ | ||
| 352 | void tcp_initialize_rcv_mss(struct sock *sk) | ||
| 353 | { | ||
| 354 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 355 | unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); | ||
| 356 | |||
| 357 | hint = min(hint, tp->rcv_wnd/2); | ||
| 358 | hint = min(hint, TCP_MIN_RCVMSS); | ||
| 359 | hint = max(hint, TCP_MIN_MSS); | ||
| 360 | |||
| 361 | inet_csk(sk)->icsk_ack.rcv_mss = hint; | ||
| 362 | } | ||
| 363 | |||
| 344 | /* Receiver "autotuning" code. | 364 | /* Receiver "autotuning" code. |
| 345 | * | 365 | * |
| 346 | * The algorithm for RTT estimation w/o timestamps is based on | 366 | * The algorithm for RTT estimation w/o timestamps is based on |
| @@ -735,6 +755,27 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) | |||
| 735 | return min_t(__u32, cwnd, tp->snd_cwnd_clamp); | 755 | return min_t(__u32, cwnd, tp->snd_cwnd_clamp); |
| 736 | } | 756 | } |
| 737 | 757 | ||
| 758 | /* Set slow start threshold and cwnd not falling to slow start */ | ||
| 759 | void tcp_enter_cwr(struct sock *sk) | ||
| 760 | { | ||
| 761 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 762 | |||
| 763 | tp->prior_ssthresh = 0; | ||
| 764 | tp->bytes_acked = 0; | ||
| 765 | if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { | ||
| 766 | tp->undo_marker = 0; | ||
| 767 | tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); | ||
| 768 | tp->snd_cwnd = min(tp->snd_cwnd, | ||
| 769 | tcp_packets_in_flight(tp) + 1U); | ||
| 770 | tp->snd_cwnd_cnt = 0; | ||
| 771 | tp->high_seq = tp->snd_nxt; | ||
| 772 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
| 773 | TCP_ECN_queue_cwr(tp); | ||
| 774 | |||
| 775 | tcp_set_ca_state(sk, TCP_CA_CWR); | ||
| 776 | } | ||
| 777 | } | ||
| 778 | |||
| 738 | /* Initialize metrics on socket. */ | 779 | /* Initialize metrics on socket. */ |
| 739 | 780 | ||
| 740 | static void tcp_init_metrics(struct sock *sk) | 781 | static void tcp_init_metrics(struct sock *sk) |
| @@ -2070,8 +2111,8 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, | |||
| 2070 | tcp_ack_no_tstamp(sk, seq_rtt, flag); | 2111 | tcp_ack_no_tstamp(sk, seq_rtt, flag); |
| 2071 | } | 2112 | } |
| 2072 | 2113 | ||
| 2073 | static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | 2114 | static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, |
| 2074 | u32 in_flight, int good) | 2115 | u32 in_flight, int good) |
| 2075 | { | 2116 | { |
| 2076 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2117 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 2077 | icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good); | 2118 | icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good); |
| @@ -2082,7 +2123,7 @@ static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
| 2082 | * RFC2988 recommends to restart timer to now+rto. | 2123 | * RFC2988 recommends to restart timer to now+rto. |
| 2083 | */ | 2124 | */ |
| 2084 | 2125 | ||
| 2085 | static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) | 2126 | static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) |
| 2086 | { | 2127 | { |
| 2087 | if (!tp->packets_out) { | 2128 | if (!tp->packets_out) { |
| 2088 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); | 2129 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); |
| @@ -2147,7 +2188,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, | |||
| 2147 | return acked; | 2188 | return acked; |
| 2148 | } | 2189 | } |
| 2149 | 2190 | ||
| 2150 | static inline u32 tcp_usrtt(const struct sk_buff *skb) | 2191 | static u32 tcp_usrtt(const struct sk_buff *skb) |
| 2151 | { | 2192 | { |
| 2152 | struct timeval tv, now; | 2193 | struct timeval tv, now; |
| 2153 | 2194 | ||
| @@ -2342,7 +2383,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, | |||
| 2342 | 2383 | ||
| 2343 | if (nwin > tp->max_window) { | 2384 | if (nwin > tp->max_window) { |
| 2344 | tp->max_window = nwin; | 2385 | tp->max_window = nwin; |
| 2345 | tcp_sync_mss(sk, tp->pmtu_cookie); | 2386 | tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); |
| 2346 | } | 2387 | } |
| 2347 | } | 2388 | } |
| 2348 | } | 2389 | } |
| @@ -2583,8 +2624,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
| 2583 | /* Fast parse options. This hopes to only see timestamps. | 2624 | /* Fast parse options. This hopes to only see timestamps. |
| 2584 | * If it is wrong it falls back on tcp_parse_options(). | 2625 | * If it is wrong it falls back on tcp_parse_options(). |
| 2585 | */ | 2626 | */ |
| 2586 | static inline int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, | 2627 | static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, |
| 2587 | struct tcp_sock *tp) | 2628 | struct tcp_sock *tp) |
| 2588 | { | 2629 | { |
| 2589 | if (th->doff == sizeof(struct tcphdr)>>2) { | 2630 | if (th->doff == sizeof(struct tcphdr)>>2) { |
| 2590 | tp->rx_opt.saw_tstamp = 0; | 2631 | tp->rx_opt.saw_tstamp = 0; |
| @@ -2804,8 +2845,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
| 2804 | } | 2845 | } |
| 2805 | } | 2846 | } |
| 2806 | 2847 | ||
| 2807 | static __inline__ int | 2848 | static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) |
| 2808 | tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) | ||
| 2809 | { | 2849 | { |
| 2810 | if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { | 2850 | if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { |
| 2811 | if (before(seq, sp->start_seq)) | 2851 | if (before(seq, sp->start_seq)) |
| @@ -2817,7 +2857,7 @@ tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) | |||
| 2817 | return 0; | 2857 | return 0; |
| 2818 | } | 2858 | } |
| 2819 | 2859 | ||
| 2820 | static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) | 2860 | static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) |
| 2821 | { | 2861 | { |
| 2822 | if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { | 2862 | if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { |
| 2823 | if (before(seq, tp->rcv_nxt)) | 2863 | if (before(seq, tp->rcv_nxt)) |
| @@ -2832,7 +2872,7 @@ static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) | |||
| 2832 | } | 2872 | } |
| 2833 | } | 2873 | } |
| 2834 | 2874 | ||
| 2835 | static inline void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) | 2875 | static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) |
| 2836 | { | 2876 | { |
| 2837 | if (!tp->rx_opt.dsack) | 2877 | if (!tp->rx_opt.dsack) |
| 2838 | tcp_dsack_set(tp, seq, end_seq); | 2878 | tcp_dsack_set(tp, seq, end_seq); |
| @@ -2890,7 +2930,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) | |||
| 2890 | } | 2930 | } |
| 2891 | } | 2931 | } |
| 2892 | 2932 | ||
| 2893 | static __inline__ void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2) | 2933 | static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2) |
| 2894 | { | 2934 | { |
| 2895 | __u32 tmp; | 2935 | __u32 tmp; |
| 2896 | 2936 | ||
| @@ -3307,7 +3347,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
| 3307 | int offset = start - TCP_SKB_CB(skb)->seq; | 3347 | int offset = start - TCP_SKB_CB(skb)->seq; |
| 3308 | int size = TCP_SKB_CB(skb)->end_seq - start; | 3348 | int size = TCP_SKB_CB(skb)->end_seq - start; |
| 3309 | 3349 | ||
| 3310 | if (offset < 0) BUG(); | 3350 | BUG_ON(offset < 0); |
| 3311 | if (size > 0) { | 3351 | if (size > 0) { |
| 3312 | size = min(copy, size); | 3352 | size = min(copy, size); |
| 3313 | if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) | 3353 | if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) |
| @@ -3455,7 +3495,7 @@ void tcp_cwnd_application_limited(struct sock *sk) | |||
| 3455 | tp->snd_cwnd_stamp = tcp_time_stamp; | 3495 | tp->snd_cwnd_stamp = tcp_time_stamp; |
| 3456 | } | 3496 | } |
| 3457 | 3497 | ||
| 3458 | static inline int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp) | 3498 | static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp) |
| 3459 | { | 3499 | { |
| 3460 | /* If the user specified a specific send buffer setting, do | 3500 | /* If the user specified a specific send buffer setting, do |
| 3461 | * not modify it. | 3501 | * not modify it. |
| @@ -3502,7 +3542,7 @@ static void tcp_new_space(struct sock *sk) | |||
| 3502 | sk->sk_write_space(sk); | 3542 | sk->sk_write_space(sk); |
| 3503 | } | 3543 | } |
| 3504 | 3544 | ||
| 3505 | static inline void tcp_check_space(struct sock *sk) | 3545 | static void tcp_check_space(struct sock *sk) |
| 3506 | { | 3546 | { |
| 3507 | if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { | 3547 | if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { |
| 3508 | sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); | 3548 | sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); |
| @@ -3512,7 +3552,7 @@ static inline void tcp_check_space(struct sock *sk) | |||
| 3512 | } | 3552 | } |
| 3513 | } | 3553 | } |
| 3514 | 3554 | ||
| 3515 | static __inline__ void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp) | 3555 | static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp) |
| 3516 | { | 3556 | { |
| 3517 | tcp_push_pending_frames(sk, tp); | 3557 | tcp_push_pending_frames(sk, tp); |
| 3518 | tcp_check_space(sk); | 3558 | tcp_check_space(sk); |
| @@ -3544,7 +3584,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) | |||
| 3544 | } | 3584 | } |
| 3545 | } | 3585 | } |
| 3546 | 3586 | ||
| 3547 | static __inline__ void tcp_ack_snd_check(struct sock *sk) | 3587 | static inline void tcp_ack_snd_check(struct sock *sk) |
| 3548 | { | 3588 | { |
| 3549 | if (!inet_csk_ack_scheduled(sk)) { | 3589 | if (!inet_csk_ack_scheduled(sk)) { |
| 3550 | /* We sent a data segment already. */ | 3590 | /* We sent a data segment already. */ |
| @@ -3692,8 +3732,7 @@ static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) | |||
| 3692 | return result; | 3732 | return result; |
| 3693 | } | 3733 | } |
| 3694 | 3734 | ||
| 3695 | static __inline__ int | 3735 | static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) |
| 3696 | tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) | ||
| 3697 | { | 3736 | { |
| 3698 | return skb->ip_summed != CHECKSUM_UNNECESSARY && | 3737 | return skb->ip_summed != CHECKSUM_UNNECESSARY && |
| 3699 | __tcp_checksum_complete_user(sk, skb); | 3738 | __tcp_checksum_complete_user(sk, skb); |
| @@ -3967,12 +4006,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 3967 | struct tcphdr *th, unsigned len) | 4006 | struct tcphdr *th, unsigned len) |
| 3968 | { | 4007 | { |
| 3969 | struct tcp_sock *tp = tcp_sk(sk); | 4008 | struct tcp_sock *tp = tcp_sk(sk); |
| 4009 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 3970 | int saved_clamp = tp->rx_opt.mss_clamp; | 4010 | int saved_clamp = tp->rx_opt.mss_clamp; |
| 3971 | 4011 | ||
| 3972 | tcp_parse_options(skb, &tp->rx_opt, 0); | 4012 | tcp_parse_options(skb, &tp->rx_opt, 0); |
| 3973 | 4013 | ||
| 3974 | if (th->ack) { | 4014 | if (th->ack) { |
| 3975 | struct inet_connection_sock *icsk; | ||
| 3976 | /* rfc793: | 4015 | /* rfc793: |
| 3977 | * "If the state is SYN-SENT then | 4016 | * "If the state is SYN-SENT then |
| 3978 | * first check the ACK bit | 4017 | * first check the ACK bit |
| @@ -4061,7 +4100,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4061 | if (tp->rx_opt.sack_ok && sysctl_tcp_fack) | 4100 | if (tp->rx_opt.sack_ok && sysctl_tcp_fack) |
| 4062 | tp->rx_opt.sack_ok |= 2; | 4101 | tp->rx_opt.sack_ok |= 2; |
| 4063 | 4102 | ||
| 4064 | tcp_sync_mss(sk, tp->pmtu_cookie); | 4103 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
| 4065 | tcp_initialize_rcv_mss(sk); | 4104 | tcp_initialize_rcv_mss(sk); |
| 4066 | 4105 | ||
| 4067 | /* Remember, tcp_poll() does not lock socket! | 4106 | /* Remember, tcp_poll() does not lock socket! |
| @@ -4072,7 +4111,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4072 | tcp_set_state(sk, TCP_ESTABLISHED); | 4111 | tcp_set_state(sk, TCP_ESTABLISHED); |
| 4073 | 4112 | ||
| 4074 | /* Make sure socket is routed, for correct metrics. */ | 4113 | /* Make sure socket is routed, for correct metrics. */ |
| 4075 | tp->af_specific->rebuild_header(sk); | 4114 | icsk->icsk_af_ops->rebuild_header(sk); |
| 4076 | 4115 | ||
| 4077 | tcp_init_metrics(sk); | 4116 | tcp_init_metrics(sk); |
| 4078 | 4117 | ||
| @@ -4098,8 +4137,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4098 | sk_wake_async(sk, 0, POLL_OUT); | 4137 | sk_wake_async(sk, 0, POLL_OUT); |
| 4099 | } | 4138 | } |
| 4100 | 4139 | ||
| 4101 | icsk = inet_csk(sk); | ||
| 4102 | |||
| 4103 | if (sk->sk_write_pending || | 4140 | if (sk->sk_write_pending || |
| 4104 | icsk->icsk_accept_queue.rskq_defer_accept || | 4141 | icsk->icsk_accept_queue.rskq_defer_accept || |
| 4105 | icsk->icsk_ack.pingpong) { | 4142 | icsk->icsk_ack.pingpong) { |
| @@ -4173,7 +4210,7 @@ discard: | |||
| 4173 | if (tp->ecn_flags&TCP_ECN_OK) | 4210 | if (tp->ecn_flags&TCP_ECN_OK) |
| 4174 | sock_set_flag(sk, SOCK_NO_LARGESEND); | 4211 | sock_set_flag(sk, SOCK_NO_LARGESEND); |
| 4175 | 4212 | ||
| 4176 | tcp_sync_mss(sk, tp->pmtu_cookie); | 4213 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
| 4177 | tcp_initialize_rcv_mss(sk); | 4214 | tcp_initialize_rcv_mss(sk); |
| 4178 | 4215 | ||
| 4179 | 4216 | ||
| @@ -4220,6 +4257,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4220 | struct tcphdr *th, unsigned len) | 4257 | struct tcphdr *th, unsigned len) |
| 4221 | { | 4258 | { |
| 4222 | struct tcp_sock *tp = tcp_sk(sk); | 4259 | struct tcp_sock *tp = tcp_sk(sk); |
| 4260 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 4223 | int queued = 0; | 4261 | int queued = 0; |
| 4224 | 4262 | ||
| 4225 | tp->rx_opt.saw_tstamp = 0; | 4263 | tp->rx_opt.saw_tstamp = 0; |
| @@ -4236,7 +4274,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4236 | goto discard; | 4274 | goto discard; |
| 4237 | 4275 | ||
| 4238 | if(th->syn) { | 4276 | if(th->syn) { |
| 4239 | if(tp->af_specific->conn_request(sk, skb) < 0) | 4277 | if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) |
| 4240 | return 1; | 4278 | return 1; |
| 4241 | 4279 | ||
| 4242 | /* Now we have several options: In theory there is | 4280 | /* Now we have several options: In theory there is |
| @@ -4349,7 +4387,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 4349 | /* Make sure socket is routed, for | 4387 | /* Make sure socket is routed, for |
| 4350 | * correct metrics. | 4388 | * correct metrics. |
| 4351 | */ | 4389 | */ |
| 4352 | tp->af_specific->rebuild_header(sk); | 4390 | icsk->icsk_af_ops->rebuild_header(sk); |
| 4353 | 4391 | ||
| 4354 | tcp_init_metrics(sk); | 4392 | tcp_init_metrics(sk); |
| 4355 | 4393 | ||
| @@ -4475,3 +4513,4 @@ EXPORT_SYMBOL(sysctl_tcp_abc); | |||
| 4475 | EXPORT_SYMBOL(tcp_parse_options); | 4513 | EXPORT_SYMBOL(tcp_parse_options); |
| 4476 | EXPORT_SYMBOL(tcp_rcv_established); | 4514 | EXPORT_SYMBOL(tcp_rcv_established); |
| 4477 | EXPORT_SYMBOL(tcp_rcv_state_process); | 4515 | EXPORT_SYMBOL(tcp_rcv_state_process); |
| 4516 | EXPORT_SYMBOL(tcp_initialize_rcv_mss); | ||
