aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h193
1 files changed, 27 insertions, 166 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 369930497401..77f21c65bbca 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -445,34 +445,16 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
445extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 445extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
446 sk_read_actor_t recv_actor); 446 sk_read_actor_t recv_actor);
447 447
448/* Initialize RCV_MSS value. 448extern void tcp_initialize_rcv_mss(struct sock *sk);
449 * RCV_MSS is an our guess about MSS used by the peer.
450 * We haven't any direct information about the MSS.
451 * It's better to underestimate the RCV_MSS rather than overestimate.
452 * Overestimations make us ACKing less frequently than needed.
453 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
454 */
455 449
456static inline void tcp_initialize_rcv_mss(struct sock *sk) 450static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
457{
458 struct tcp_sock *tp = tcp_sk(sk);
459 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
460
461 hint = min(hint, tp->rcv_wnd/2);
462 hint = min(hint, TCP_MIN_RCVMSS);
463 hint = max(hint, TCP_MIN_MSS);
464
465 inet_csk(sk)->icsk_ack.rcv_mss = hint;
466}
467
468static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
469{ 451{
470 tp->pred_flags = htonl((tp->tcp_header_len << 26) | 452 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
471 ntohl(TCP_FLAG_ACK) | 453 ntohl(TCP_FLAG_ACK) |
472 snd_wnd); 454 snd_wnd);
473} 455}
474 456
475static __inline__ void tcp_fast_path_on(struct tcp_sock *tp) 457static inline void tcp_fast_path_on(struct tcp_sock *tp)
476{ 458{
477 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); 459 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
478} 460}
@@ -490,7 +472,7 @@ static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
490 * Rcv_nxt can be after the window if our peer push more data 472 * Rcv_nxt can be after the window if our peer push more data
491 * than the offered window. 473 * than the offered window.
492 */ 474 */
493static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp) 475static inline u32 tcp_receive_window(const struct tcp_sock *tp)
494{ 476{
495 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; 477 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
496 478
@@ -662,6 +644,7 @@ extern void tcp_cleanup_congestion_control(struct sock *sk);
662extern int tcp_set_default_congestion_control(const char *name); 644extern int tcp_set_default_congestion_control(const char *name);
663extern void tcp_get_default_congestion_control(char *name); 645extern void tcp_get_default_congestion_control(char *name);
664extern int tcp_set_congestion_control(struct sock *sk, const char *name); 646extern int tcp_set_congestion_control(struct sock *sk, const char *name);
647extern void tcp_slow_start(struct tcp_sock *tp);
665 648
666extern struct tcp_congestion_ops tcp_init_congestion_ops; 649extern struct tcp_congestion_ops tcp_init_congestion_ops;
667extern u32 tcp_reno_ssthresh(struct sock *sk); 650extern u32 tcp_reno_ssthresh(struct sock *sk);
@@ -701,7 +684,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
701 * "Packets left network, but not honestly ACKed yet" PLUS 684 * "Packets left network, but not honestly ACKed yet" PLUS
702 * "Packets fast retransmitted" 685 * "Packets fast retransmitted"
703 */ 686 */
704static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) 687static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
705{ 688{
706 return (tp->packets_out - tp->left_out + tp->retrans_out); 689 return (tp->packets_out - tp->left_out + tp->retrans_out);
707} 690}
@@ -721,33 +704,6 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
721 (tp->snd_cwnd >> 2))); 704 (tp->snd_cwnd >> 2)));
722} 705}
723 706
724/*
725 * Linear increase during slow start
726 */
727static inline void tcp_slow_start(struct tcp_sock *tp)
728{
729 if (sysctl_tcp_abc) {
730 /* RFC3465: Slow Start
731 * TCP sender SHOULD increase cwnd by the number of
732 * previously unacknowledged bytes ACKed by each incoming
733 * acknowledgment, provided the increase is not more than L
734 */
735 if (tp->bytes_acked < tp->mss_cache)
736 return;
737
738 /* We MAY increase by 2 if discovered delayed ack */
739 if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) {
740 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
741 tp->snd_cwnd++;
742 }
743 }
744 tp->bytes_acked = 0;
745
746 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
747 tp->snd_cwnd++;
748}
749
750
751static inline void tcp_sync_left_out(struct tcp_sock *tp) 707static inline void tcp_sync_left_out(struct tcp_sock *tp)
752{ 708{
753 if (tp->rx_opt.sack_ok && 709 if (tp->rx_opt.sack_ok &&
@@ -756,34 +712,7 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp)
756 tp->left_out = tp->sacked_out + tp->lost_out; 712 tp->left_out = tp->sacked_out + tp->lost_out;
757} 713}
758 714
759/* Set slow start threshold and cwnd not falling to slow start */ 715extern void tcp_enter_cwr(struct sock *sk);
760static inline void __tcp_enter_cwr(struct sock *sk)
761{
762 const struct inet_connection_sock *icsk = inet_csk(sk);
763 struct tcp_sock *tp = tcp_sk(sk);
764
765 tp->undo_marker = 0;
766 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
767 tp->snd_cwnd = min(tp->snd_cwnd,
768 tcp_packets_in_flight(tp) + 1U);
769 tp->snd_cwnd_cnt = 0;
770 tp->high_seq = tp->snd_nxt;
771 tp->snd_cwnd_stamp = tcp_time_stamp;
772 TCP_ECN_queue_cwr(tp);
773}
774
775static inline void tcp_enter_cwr(struct sock *sk)
776{
777 struct tcp_sock *tp = tcp_sk(sk);
778
779 tp->prior_ssthresh = 0;
780 tp->bytes_acked = 0;
781 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
782 __tcp_enter_cwr(sk);
783 tcp_set_ca_state(sk, TCP_CA_CWR);
784 }
785}
786
787extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); 716extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
788 717
789/* Slow start with delack produces 3 packets of burst, so that 718/* Slow start with delack produces 3 packets of burst, so that
@@ -815,14 +744,14 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
815 return left <= tcp_max_burst(tp); 744 return left <= tcp_max_burst(tp);
816} 745}
817 746
818static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, 747static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
819 const struct sk_buff *skb) 748 const struct sk_buff *skb)
820{ 749{
821 if (skb->len < mss) 750 if (skb->len < mss)
822 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 751 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
823} 752}
824 753
825static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) 754static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
826{ 755{
827 const struct inet_connection_sock *icsk = inet_csk(sk); 756 const struct inet_connection_sock *icsk = inet_csk(sk);
828 if (!tp->packets_out && !icsk->icsk_pending) 757 if (!tp->packets_out && !icsk->icsk_pending)
@@ -830,18 +759,18 @@ static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *t
830 icsk->icsk_rto, TCP_RTO_MAX); 759 icsk->icsk_rto, TCP_RTO_MAX);
831} 760}
832 761
833static __inline__ void tcp_push_pending_frames(struct sock *sk, 762static inline void tcp_push_pending_frames(struct sock *sk,
834 struct tcp_sock *tp) 763 struct tcp_sock *tp)
835{ 764{
836 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); 765 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
837} 766}
838 767
839static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) 768static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
840{ 769{
841 tp->snd_wl1 = seq; 770 tp->snd_wl1 = seq;
842} 771}
843 772
844static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) 773static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
845{ 774{
846 tp->snd_wl1 = seq; 775 tp->snd_wl1 = seq;
847} 776}
@@ -849,19 +778,19 @@ static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
849/* 778/*
850 * Calculate(/check) TCP checksum 779 * Calculate(/check) TCP checksum
851 */ 780 */
852static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len, 781static inline u16 tcp_v4_check(struct tcphdr *th, int len,
853 unsigned long saddr, unsigned long daddr, 782 unsigned long saddr, unsigned long daddr,
854 unsigned long base) 783 unsigned long base)
855{ 784{
856 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); 785 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
857} 786}
858 787
859static __inline__ int __tcp_checksum_complete(struct sk_buff *skb) 788static inline int __tcp_checksum_complete(struct sk_buff *skb)
860{ 789{
861 return __skb_checksum_complete(skb); 790 return __skb_checksum_complete(skb);
862} 791}
863 792
864static __inline__ int tcp_checksum_complete(struct sk_buff *skb) 793static inline int tcp_checksum_complete(struct sk_buff *skb)
865{ 794{
866 return skb->ip_summed != CHECKSUM_UNNECESSARY && 795 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
867 __tcp_checksum_complete(skb); 796 __tcp_checksum_complete(skb);
@@ -869,7 +798,7 @@ static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
869 798
870/* Prequeue for VJ style copy to user, combined with checksumming. */ 799/* Prequeue for VJ style copy to user, combined with checksumming. */
871 800
872static __inline__ void tcp_prequeue_init(struct tcp_sock *tp) 801static inline void tcp_prequeue_init(struct tcp_sock *tp)
873{ 802{
874 tp->ucopy.task = NULL; 803 tp->ucopy.task = NULL;
875 tp->ucopy.len = 0; 804 tp->ucopy.len = 0;
@@ -885,7 +814,7 @@ static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
885 * 814 *
886 * NOTE: is this not too big to inline? 815 * NOTE: is this not too big to inline?
887 */ 816 */
888static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) 817static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
889{ 818{
890 struct tcp_sock *tp = tcp_sk(sk); 819 struct tcp_sock *tp = tcp_sk(sk);
891 820
@@ -926,7 +855,7 @@ static const char *statename[]={
926}; 855};
927#endif 856#endif
928 857
929static __inline__ void tcp_set_state(struct sock *sk, int state) 858static inline void tcp_set_state(struct sock *sk, int state)
930{ 859{
931 int oldstate = sk->sk_state; 860 int oldstate = sk->sk_state;
932 861
@@ -960,7 +889,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
960#endif 889#endif
961} 890}
962 891
963static __inline__ void tcp_done(struct sock *sk) 892static inline void tcp_done(struct sock *sk)
964{ 893{
965 tcp_set_state(sk, TCP_CLOSE); 894 tcp_set_state(sk, TCP_CLOSE);
966 tcp_clear_xmit_timers(sk); 895 tcp_clear_xmit_timers(sk);
@@ -973,81 +902,13 @@ static __inline__ void tcp_done(struct sock *sk)
973 inet_csk_destroy_sock(sk); 902 inet_csk_destroy_sock(sk);
974} 903}
975 904
976static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt) 905static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
977{ 906{
978 rx_opt->dsack = 0; 907 rx_opt->dsack = 0;
979 rx_opt->eff_sacks = 0; 908 rx_opt->eff_sacks = 0;
980 rx_opt->num_sacks = 0; 909 rx_opt->num_sacks = 0;
981} 910}
982 911
983static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
984{
985 if (tp->rx_opt.tstamp_ok) {
986 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
987 (TCPOPT_NOP << 16) |
988 (TCPOPT_TIMESTAMP << 8) |
989 TCPOLEN_TIMESTAMP);
990 *ptr++ = htonl(tstamp);
991 *ptr++ = htonl(tp->rx_opt.ts_recent);
992 }
993 if (tp->rx_opt.eff_sacks) {
994 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
995 int this_sack;
996
997 *ptr++ = htonl((TCPOPT_NOP << 24) |
998 (TCPOPT_NOP << 16) |
999 (TCPOPT_SACK << 8) |
1000 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
1001 TCPOLEN_SACK_PERBLOCK)));
1002 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
1003 *ptr++ = htonl(sp[this_sack].start_seq);
1004 *ptr++ = htonl(sp[this_sack].end_seq);
1005 }
1006 if (tp->rx_opt.dsack) {
1007 tp->rx_opt.dsack = 0;
1008 tp->rx_opt.eff_sacks--;
1009 }
1010 }
1011}
1012
1013/* Construct a tcp options header for a SYN or SYN_ACK packet.
1014 * If this is every changed make sure to change the definition of
1015 * MAX_SYN_SIZE to match the new maximum number of options that you
1016 * can generate.
1017 */
1018static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1019 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1020{
1021 /* We always get an MSS option.
1022 * The option bytes which will be seen in normal data
1023 * packets should timestamps be used, must be in the MSS
1024 * advertised. But we subtract them from tp->mss_cache so
1025 * that calculations in tcp_sendmsg are simpler etc.
1026 * So account for this fact here if necessary. If we
1027 * don't do this correctly, as a receiver we won't
1028 * recognize data packets as being full sized when we
1029 * should, and thus we won't abide by the delayed ACK
1030 * rules correctly.
1031 * SACKs don't matter, we never delay an ACK when we
1032 * have any of those going out.
1033 */
1034 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1035 if (ts) {
1036 if(sack)
1037 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1038 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1039 else
1040 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1041 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1042 *ptr++ = htonl(tstamp); /* TSVAL */
1043 *ptr++ = htonl(ts_recent); /* TSECR */
1044 } else if(sack)
1045 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1046 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1047 if (offer_wscale)
1048 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1049}
1050
1051/* Determine a window scaling and initial window to offer. */ 912/* Determine a window scaling and initial window to offer. */
1052extern void tcp_select_initial_window(int __space, __u32 mss, 913extern void tcp_select_initial_window(int __space, __u32 mss,
1053 __u32 *rcv_wnd, __u32 *window_clamp, 914 __u32 *rcv_wnd, __u32 *window_clamp,
@@ -1072,9 +933,9 @@ static inline int tcp_full_space(const struct sock *sk)
1072 return tcp_win_from_space(sk->sk_rcvbuf); 933 return tcp_win_from_space(sk->sk_rcvbuf);
1073} 934}
1074 935
1075static __inline__ void tcp_openreq_init(struct request_sock *req, 936static inline void tcp_openreq_init(struct request_sock *req,
1076 struct tcp_options_received *rx_opt, 937 struct tcp_options_received *rx_opt,
1077 struct sk_buff *skb) 938 struct sk_buff *skb)
1078{ 939{
1079 struct inet_request_sock *ireq = inet_rsk(req); 940 struct inet_request_sock *ireq = inet_rsk(req);
1080 941