diff options
Diffstat (limited to 'include/net/tcp.h')
| -rw-r--r-- | include/net/tcp.h | 246 | 
1 files changed, 31 insertions, 215 deletions
| diff --git a/include/net/tcp.h b/include/net/tcp.h index d78025f9fbea..77f21c65bbca 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -225,53 +225,6 @@ extern atomic_t tcp_sockets_allocated; | |||
| 225 | extern int tcp_memory_pressure; | 225 | extern int tcp_memory_pressure; | 
| 226 | 226 | ||
| 227 | /* | 227 | /* | 
| 228 | * Pointers to address related TCP functions | ||
| 229 | * (i.e. things that depend on the address family) | ||
| 230 | */ | ||
| 231 | |||
| 232 | struct tcp_func { | ||
| 233 | int (*queue_xmit) (struct sk_buff *skb, | ||
| 234 | int ipfragok); | ||
| 235 | |||
| 236 | void (*send_check) (struct sock *sk, | ||
| 237 | struct tcphdr *th, | ||
| 238 | int len, | ||
| 239 | struct sk_buff *skb); | ||
| 240 | |||
| 241 | int (*rebuild_header) (struct sock *sk); | ||
| 242 | |||
| 243 | int (*conn_request) (struct sock *sk, | ||
| 244 | struct sk_buff *skb); | ||
| 245 | |||
| 246 | struct sock * (*syn_recv_sock) (struct sock *sk, | ||
| 247 | struct sk_buff *skb, | ||
| 248 | struct request_sock *req, | ||
| 249 | struct dst_entry *dst); | ||
| 250 | |||
| 251 | int (*remember_stamp) (struct sock *sk); | ||
| 252 | |||
| 253 | __u16 net_header_len; | ||
| 254 | |||
| 255 | int (*setsockopt) (struct sock *sk, | ||
| 256 | int level, | ||
| 257 | int optname, | ||
| 258 | char __user *optval, | ||
| 259 | int optlen); | ||
| 260 | |||
| 261 | int (*getsockopt) (struct sock *sk, | ||
| 262 | int level, | ||
| 263 | int optname, | ||
| 264 | char __user *optval, | ||
| 265 | int __user *optlen); | ||
| 266 | |||
| 267 | |||
| 268 | void (*addr2sockaddr) (struct sock *sk, | ||
| 269 | struct sockaddr *); | ||
| 270 | |||
| 271 | int sockaddr_len; | ||
| 272 | }; | ||
| 273 | |||
| 274 | /* | ||
| 275 | * The next routines deal with comparing 32 bit unsigned ints | 228 | * The next routines deal with comparing 32 bit unsigned ints | 
| 276 | * and worry about wraparound (automatic with unsigned arithmetic). | 229 | * and worry about wraparound (automatic with unsigned arithmetic). | 
| 277 | */ | 230 | */ | 
| @@ -334,6 +287,9 @@ extern int tcp_rcv_established(struct sock *sk, | |||
| 334 | 287 | ||
| 335 | extern void tcp_rcv_space_adjust(struct sock *sk); | 288 | extern void tcp_rcv_space_adjust(struct sock *sk); | 
| 336 | 289 | ||
| 290 | extern int tcp_twsk_unique(struct sock *sk, | ||
| 291 | struct sock *sktw, void *twp); | ||
| 292 | |||
| 337 | static inline void tcp_dec_quickack_mode(struct sock *sk, | 293 | static inline void tcp_dec_quickack_mode(struct sock *sk, | 
| 338 | const unsigned int pkts) | 294 | const unsigned int pkts) | 
| 339 | { | 295 | { | 
| @@ -405,8 +361,7 @@ extern void tcp_parse_options(struct sk_buff *skb, | |||
| 405 | * TCP v4 functions exported for the inet6 API | 361 | * TCP v4 functions exported for the inet6 API | 
| 406 | */ | 362 | */ | 
| 407 | 363 | ||
| 408 | extern void tcp_v4_send_check(struct sock *sk, | 364 | extern void tcp_v4_send_check(struct sock *sk, int len, | 
| 409 | struct tcphdr *th, int len, | ||
| 410 | struct sk_buff *skb); | 365 | struct sk_buff *skb); | 
| 411 | 366 | ||
| 412 | extern int tcp_v4_conn_request(struct sock *sk, | 367 | extern int tcp_v4_conn_request(struct sock *sk, | 
| @@ -490,34 +445,16 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | |||
| 490 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | 445 | extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | 
| 491 | sk_read_actor_t recv_actor); | 446 | sk_read_actor_t recv_actor); | 
| 492 | 447 | ||
| 493 | /* Initialize RCV_MSS value. | 448 | extern void tcp_initialize_rcv_mss(struct sock *sk); | 
| 494 | * RCV_MSS is an our guess about MSS used by the peer. | ||
| 495 | * We haven't any direct information about the MSS. | ||
| 496 | * It's better to underestimate the RCV_MSS rather than overestimate. | ||
| 497 | * Overestimations make us ACKing less frequently than needed. | ||
| 498 | * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). | ||
| 499 | */ | ||
| 500 | 449 | ||
| 501 | static inline void tcp_initialize_rcv_mss(struct sock *sk) | 450 | static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) | 
| 502 | { | ||
| 503 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 504 | unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); | ||
| 505 | |||
| 506 | hint = min(hint, tp->rcv_wnd/2); | ||
| 507 | hint = min(hint, TCP_MIN_RCVMSS); | ||
| 508 | hint = max(hint, TCP_MIN_MSS); | ||
| 509 | |||
| 510 | inet_csk(sk)->icsk_ack.rcv_mss = hint; | ||
| 511 | } | ||
| 512 | |||
| 513 | static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) | ||
| 514 | { | 451 | { | 
| 515 | tp->pred_flags = htonl((tp->tcp_header_len << 26) | | 452 | tp->pred_flags = htonl((tp->tcp_header_len << 26) | | 
| 516 | ntohl(TCP_FLAG_ACK) | | 453 | ntohl(TCP_FLAG_ACK) | | 
| 517 | snd_wnd); | 454 | snd_wnd); | 
| 518 | } | 455 | } | 
| 519 | 456 | ||
| 520 | static __inline__ void tcp_fast_path_on(struct tcp_sock *tp) | 457 | static inline void tcp_fast_path_on(struct tcp_sock *tp) | 
| 521 | { | 458 | { | 
| 522 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | 459 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | 
| 523 | } | 460 | } | 
| @@ -535,7 +472,7 @@ static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) | |||
| 535 | * Rcv_nxt can be after the window if our peer push more data | 472 | * Rcv_nxt can be after the window if our peer push more data | 
| 536 | * than the offered window. | 473 | * than the offered window. | 
| 537 | */ | 474 | */ | 
| 538 | static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp) | 475 | static inline u32 tcp_receive_window(const struct tcp_sock *tp) | 
| 539 | { | 476 | { | 
| 540 | s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; | 477 | s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; | 
| 541 | 478 | ||
| @@ -707,6 +644,7 @@ extern void tcp_cleanup_congestion_control(struct sock *sk); | |||
| 707 | extern int tcp_set_default_congestion_control(const char *name); | 644 | extern int tcp_set_default_congestion_control(const char *name); | 
| 708 | extern void tcp_get_default_congestion_control(char *name); | 645 | extern void tcp_get_default_congestion_control(char *name); | 
| 709 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); | 646 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); | 
| 647 | extern void tcp_slow_start(struct tcp_sock *tp); | ||
| 710 | 648 | ||
| 711 | extern struct tcp_congestion_ops tcp_init_congestion_ops; | 649 | extern struct tcp_congestion_ops tcp_init_congestion_ops; | 
| 712 | extern u32 tcp_reno_ssthresh(struct sock *sk); | 650 | extern u32 tcp_reno_ssthresh(struct sock *sk); | 
| @@ -746,7 +684,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) | |||
| 746 | * "Packets left network, but not honestly ACKed yet" PLUS | 684 | * "Packets left network, but not honestly ACKed yet" PLUS | 
| 747 | * "Packets fast retransmitted" | 685 | * "Packets fast retransmitted" | 
| 748 | */ | 686 | */ | 
| 749 | static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) | 687 | static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) | 
| 750 | { | 688 | { | 
| 751 | return (tp->packets_out - tp->left_out + tp->retrans_out); | 689 | return (tp->packets_out - tp->left_out + tp->retrans_out); | 
| 752 | } | 690 | } | 
| @@ -766,33 +704,6 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) | |||
| 766 | (tp->snd_cwnd >> 2))); | 704 | (tp->snd_cwnd >> 2))); | 
| 767 | } | 705 | } | 
| 768 | 706 | ||
| 769 | /* | ||
| 770 | * Linear increase during slow start | ||
| 771 | */ | ||
| 772 | static inline void tcp_slow_start(struct tcp_sock *tp) | ||
| 773 | { | ||
| 774 | if (sysctl_tcp_abc) { | ||
| 775 | /* RFC3465: Slow Start | ||
| 776 | * TCP sender SHOULD increase cwnd by the number of | ||
| 777 | * previously unacknowledged bytes ACKed by each incoming | ||
| 778 | * acknowledgment, provided the increase is not more than L | ||
| 779 | */ | ||
| 780 | if (tp->bytes_acked < tp->mss_cache) | ||
| 781 | return; | ||
| 782 | |||
| 783 | /* We MAY increase by 2 if discovered delayed ack */ | ||
| 784 | if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) { | ||
| 785 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | ||
| 786 | tp->snd_cwnd++; | ||
| 787 | } | ||
| 788 | } | ||
| 789 | tp->bytes_acked = 0; | ||
| 790 | |||
| 791 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | ||
| 792 | tp->snd_cwnd++; | ||
| 793 | } | ||
| 794 | |||
| 795 | |||
| 796 | static inline void tcp_sync_left_out(struct tcp_sock *tp) | 707 | static inline void tcp_sync_left_out(struct tcp_sock *tp) | 
| 797 | { | 708 | { | 
| 798 | if (tp->rx_opt.sack_ok && | 709 | if (tp->rx_opt.sack_ok && | 
| @@ -801,34 +712,7 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp) | |||
| 801 | tp->left_out = tp->sacked_out + tp->lost_out; | 712 | tp->left_out = tp->sacked_out + tp->lost_out; | 
| 802 | } | 713 | } | 
| 803 | 714 | ||
| 804 | /* Set slow start threshold and cwnd not falling to slow start */ | 715 | extern void tcp_enter_cwr(struct sock *sk); | 
| 805 | static inline void __tcp_enter_cwr(struct sock *sk) | ||
| 806 | { | ||
| 807 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
| 808 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 809 | |||
| 810 | tp->undo_marker = 0; | ||
| 811 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); | ||
| 812 | tp->snd_cwnd = min(tp->snd_cwnd, | ||
| 813 | tcp_packets_in_flight(tp) + 1U); | ||
| 814 | tp->snd_cwnd_cnt = 0; | ||
| 815 | tp->high_seq = tp->snd_nxt; | ||
| 816 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
| 817 | TCP_ECN_queue_cwr(tp); | ||
| 818 | } | ||
| 819 | |||
| 820 | static inline void tcp_enter_cwr(struct sock *sk) | ||
| 821 | { | ||
| 822 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 823 | |||
| 824 | tp->prior_ssthresh = 0; | ||
| 825 | tp->bytes_acked = 0; | ||
| 826 | if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { | ||
| 827 | __tcp_enter_cwr(sk); | ||
| 828 | tcp_set_ca_state(sk, TCP_CA_CWR); | ||
| 829 | } | ||
| 830 | } | ||
| 831 | |||
| 832 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | 716 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | 
| 833 | 717 | ||
| 834 | /* Slow start with delack produces 3 packets of burst, so that | 718 | /* Slow start with delack produces 3 packets of burst, so that | 
| @@ -860,14 +744,14 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | |||
| 860 | return left <= tcp_max_burst(tp); | 744 | return left <= tcp_max_burst(tp); | 
| 861 | } | 745 | } | 
| 862 | 746 | ||
| 863 | static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, | 747 | static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, | 
| 864 | const struct sk_buff *skb) | 748 | const struct sk_buff *skb) | 
| 865 | { | 749 | { | 
| 866 | if (skb->len < mss) | 750 | if (skb->len < mss) | 
| 867 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 751 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 
| 868 | } | 752 | } | 
| 869 | 753 | ||
| 870 | static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) | 754 | static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) | 
| 871 | { | 755 | { | 
| 872 | const struct inet_connection_sock *icsk = inet_csk(sk); | 756 | const struct inet_connection_sock *icsk = inet_csk(sk); | 
| 873 | if (!tp->packets_out && !icsk->icsk_pending) | 757 | if (!tp->packets_out && !icsk->icsk_pending) | 
| @@ -875,18 +759,18 @@ static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *t | |||
| 875 | icsk->icsk_rto, TCP_RTO_MAX); | 759 | icsk->icsk_rto, TCP_RTO_MAX); | 
| 876 | } | 760 | } | 
| 877 | 761 | ||
| 878 | static __inline__ void tcp_push_pending_frames(struct sock *sk, | 762 | static inline void tcp_push_pending_frames(struct sock *sk, | 
| 879 | struct tcp_sock *tp) | 763 | struct tcp_sock *tp) | 
| 880 | { | 764 | { | 
| 881 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); | 765 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); | 
| 882 | } | 766 | } | 
| 883 | 767 | ||
| 884 | static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 768 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 
| 885 | { | 769 | { | 
| 886 | tp->snd_wl1 = seq; | 770 | tp->snd_wl1 = seq; | 
| 887 | } | 771 | } | 
| 888 | 772 | ||
| 889 | static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 773 | static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 
| 890 | { | 774 | { | 
| 891 | tp->snd_wl1 = seq; | 775 | tp->snd_wl1 = seq; | 
| 892 | } | 776 | } | 
| @@ -894,19 +778,19 @@ static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) | |||
| 894 | /* | 778 | /* | 
| 895 | * Calculate(/check) TCP checksum | 779 | * Calculate(/check) TCP checksum | 
| 896 | */ | 780 | */ | 
| 897 | static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len, | 781 | static inline u16 tcp_v4_check(struct tcphdr *th, int len, | 
| 898 | unsigned long saddr, unsigned long daddr, | 782 | unsigned long saddr, unsigned long daddr, | 
| 899 | unsigned long base) | 783 | unsigned long base) | 
| 900 | { | 784 | { | 
| 901 | return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); | 785 | return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); | 
| 902 | } | 786 | } | 
| 903 | 787 | ||
| 904 | static __inline__ int __tcp_checksum_complete(struct sk_buff *skb) | 788 | static inline int __tcp_checksum_complete(struct sk_buff *skb) | 
| 905 | { | 789 | { | 
| 906 | return __skb_checksum_complete(skb); | 790 | return __skb_checksum_complete(skb); | 
| 907 | } | 791 | } | 
| 908 | 792 | ||
| 909 | static __inline__ int tcp_checksum_complete(struct sk_buff *skb) | 793 | static inline int tcp_checksum_complete(struct sk_buff *skb) | 
| 910 | { | 794 | { | 
| 911 | return skb->ip_summed != CHECKSUM_UNNECESSARY && | 795 | return skb->ip_summed != CHECKSUM_UNNECESSARY && | 
| 912 | __tcp_checksum_complete(skb); | 796 | __tcp_checksum_complete(skb); | 
| @@ -914,7 +798,7 @@ static __inline__ int tcp_checksum_complete(struct sk_buff *skb) | |||
| 914 | 798 | ||
| 915 | /* Prequeue for VJ style copy to user, combined with checksumming. */ | 799 | /* Prequeue for VJ style copy to user, combined with checksumming. */ | 
| 916 | 800 | ||
| 917 | static __inline__ void tcp_prequeue_init(struct tcp_sock *tp) | 801 | static inline void tcp_prequeue_init(struct tcp_sock *tp) | 
| 918 | { | 802 | { | 
| 919 | tp->ucopy.task = NULL; | 803 | tp->ucopy.task = NULL; | 
| 920 | tp->ucopy.len = 0; | 804 | tp->ucopy.len = 0; | 
| @@ -930,7 +814,7 @@ static __inline__ void tcp_prequeue_init(struct tcp_sock *tp) | |||
| 930 | * | 814 | * | 
| 931 | * NOTE: is this not too big to inline? | 815 | * NOTE: is this not too big to inline? | 
| 932 | */ | 816 | */ | 
| 933 | static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | 817 | static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | 
| 934 | { | 818 | { | 
| 935 | struct tcp_sock *tp = tcp_sk(sk); | 819 | struct tcp_sock *tp = tcp_sk(sk); | 
| 936 | 820 | ||
| @@ -971,7 +855,7 @@ static const char *statename[]={ | |||
| 971 | }; | 855 | }; | 
| 972 | #endif | 856 | #endif | 
| 973 | 857 | ||
| 974 | static __inline__ void tcp_set_state(struct sock *sk, int state) | 858 | static inline void tcp_set_state(struct sock *sk, int state) | 
| 975 | { | 859 | { | 
| 976 | int oldstate = sk->sk_state; | 860 | int oldstate = sk->sk_state; | 
| 977 | 861 | ||
| @@ -1005,7 +889,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state) | |||
| 1005 | #endif | 889 | #endif | 
| 1006 | } | 890 | } | 
| 1007 | 891 | ||
| 1008 | static __inline__ void tcp_done(struct sock *sk) | 892 | static inline void tcp_done(struct sock *sk) | 
| 1009 | { | 893 | { | 
| 1010 | tcp_set_state(sk, TCP_CLOSE); | 894 | tcp_set_state(sk, TCP_CLOSE); | 
| 1011 | tcp_clear_xmit_timers(sk); | 895 | tcp_clear_xmit_timers(sk); | 
| @@ -1018,81 +902,13 @@ static __inline__ void tcp_done(struct sock *sk) | |||
| 1018 | inet_csk_destroy_sock(sk); | 902 | inet_csk_destroy_sock(sk); | 
| 1019 | } | 903 | } | 
| 1020 | 904 | ||
| 1021 | static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt) | 905 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) | 
| 1022 | { | 906 | { | 
| 1023 | rx_opt->dsack = 0; | 907 | rx_opt->dsack = 0; | 
| 1024 | rx_opt->eff_sacks = 0; | 908 | rx_opt->eff_sacks = 0; | 
| 1025 | rx_opt->num_sacks = 0; | 909 | rx_opt->num_sacks = 0; | 
| 1026 | } | 910 | } | 
| 1027 | 911 | ||
| 1028 | static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp) | ||
| 1029 | { | ||
| 1030 | if (tp->rx_opt.tstamp_ok) { | ||
| 1031 | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | | ||
| 1032 | (TCPOPT_NOP << 16) | | ||
| 1033 | (TCPOPT_TIMESTAMP << 8) | | ||
| 1034 | TCPOLEN_TIMESTAMP); | ||
| 1035 | *ptr++ = htonl(tstamp); | ||
| 1036 | *ptr++ = htonl(tp->rx_opt.ts_recent); | ||
| 1037 | } | ||
| 1038 | if (tp->rx_opt.eff_sacks) { | ||
| 1039 | struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; | ||
| 1040 | int this_sack; | ||
| 1041 | |||
| 1042 | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | | ||
| 1043 | (TCPOPT_NOP << 16) | | ||
| 1044 | (TCPOPT_SACK << 8) | | ||
| 1045 | (TCPOLEN_SACK_BASE + | ||
| 1046 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK))); | ||
| 1047 | for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { | ||
| 1048 | *ptr++ = htonl(sp[this_sack].start_seq); | ||
| 1049 | *ptr++ = htonl(sp[this_sack].end_seq); | ||
| 1050 | } | ||
| 1051 | if (tp->rx_opt.dsack) { | ||
| 1052 | tp->rx_opt.dsack = 0; | ||
| 1053 | tp->rx_opt.eff_sacks--; | ||
| 1054 | } | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | /* Construct a tcp options header for a SYN or SYN_ACK packet. | ||
| 1059 | * If this is every changed make sure to change the definition of | ||
| 1060 | * MAX_SYN_SIZE to match the new maximum number of options that you | ||
| 1061 | * can generate. | ||
| 1062 | */ | ||
| 1063 | static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, | ||
| 1064 | int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent) | ||
| 1065 | { | ||
| 1066 | /* We always get an MSS option. | ||
| 1067 | * The option bytes which will be seen in normal data | ||
| 1068 | * packets should timestamps be used, must be in the MSS | ||
| 1069 | * advertised. But we subtract them from tp->mss_cache so | ||
| 1070 | * that calculations in tcp_sendmsg are simpler etc. | ||
| 1071 | * So account for this fact here if necessary. If we | ||
| 1072 | * don't do this correctly, as a receiver we won't | ||
| 1073 | * recognize data packets as being full sized when we | ||
| 1074 | * should, and thus we won't abide by the delayed ACK | ||
| 1075 | * rules correctly. | ||
| 1076 | * SACKs don't matter, we never delay an ACK when we | ||
| 1077 | * have any of those going out. | ||
| 1078 | */ | ||
| 1079 | *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); | ||
| 1080 | if (ts) { | ||
| 1081 | if(sack) | ||
| 1082 | *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | | ||
| 1083 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); | ||
| 1084 | else | ||
| 1085 | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | ||
| 1086 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); | ||
| 1087 | *ptr++ = htonl(tstamp); /* TSVAL */ | ||
| 1088 | *ptr++ = htonl(ts_recent); /* TSECR */ | ||
| 1089 | } else if(sack) | ||
| 1090 | *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | ||
| 1091 | (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); | ||
| 1092 | if (offer_wscale) | ||
| 1093 | *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | /* Determine a window scaling and initial window to offer. */ | 912 | /* Determine a window scaling and initial window to offer. */ | 
| 1097 | extern void tcp_select_initial_window(int __space, __u32 mss, | 913 | extern void tcp_select_initial_window(int __space, __u32 mss, | 
| 1098 | __u32 *rcv_wnd, __u32 *window_clamp, | 914 | __u32 *rcv_wnd, __u32 *window_clamp, | 
| @@ -1117,9 +933,9 @@ static inline int tcp_full_space(const struct sock *sk) | |||
| 1117 | return tcp_win_from_space(sk->sk_rcvbuf); | 933 | return tcp_win_from_space(sk->sk_rcvbuf); | 
| 1118 | } | 934 | } | 
| 1119 | 935 | ||
| 1120 | static __inline__ void tcp_openreq_init(struct request_sock *req, | 936 | static inline void tcp_openreq_init(struct request_sock *req, | 
| 1121 | struct tcp_options_received *rx_opt, | 937 | struct tcp_options_received *rx_opt, | 
| 1122 | struct sk_buff *skb) | 938 | struct sk_buff *skb) | 
| 1123 | { | 939 | { | 
| 1124 | struct inet_request_sock *ireq = inet_rsk(req); | 940 | struct inet_request_sock *ireq = inet_rsk(req); | 
| 1125 | 941 | ||
