diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2009-05-08 21:29:27 -0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2009-05-08 21:29:27 -0400 |
commit | d585a021c0b10b0477d6b608c53e1feb8cde0507 (patch) | |
tree | 5ca059da1db7f15d4b29427644ad9c08270c885c /include/net/tcp.h | |
parent | 84e5b0d00f8f84c4ae226be131d4bebbcee88bd3 (diff) | |
parent | 091bf7624d1c90cec9e578a18529f615213ff847 (diff) |
Merge commit 'v2.6.30-rc5' into next
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 66 |
1 files changed, 41 insertions, 25 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 218235de8963..646dbe3962ea 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <net/ip.h> | 41 | #include <net/ip.h> |
42 | #include <net/tcp_states.h> | 42 | #include <net/tcp_states.h> |
43 | #include <net/inet_ecn.h> | 43 | #include <net/inet_ecn.h> |
44 | #include <net/dst.h> | ||
44 | 45 | ||
45 | #include <linux/seq_file.h> | 46 | #include <linux/seq_file.h> |
46 | 47 | ||
@@ -481,7 +482,16 @@ static inline void tcp_clear_xmit_timers(struct sock *sk) | |||
481 | } | 482 | } |
482 | 483 | ||
483 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); | 484 | extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); |
484 | extern unsigned int tcp_current_mss(struct sock *sk, int large); | 485 | extern unsigned int tcp_current_mss(struct sock *sk); |
486 | |||
487 | /* Bound MSS / TSO packet size with the half of the window */ | ||
488 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | ||
489 | { | ||
490 | if (tp->max_window && pktsize > (tp->max_window >> 1)) | ||
491 | return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | ||
492 | else | ||
493 | return pktsize; | ||
494 | } | ||
485 | 495 | ||
486 | /* tcp.c */ | 496 | /* tcp.c */ |
487 | extern void tcp_get_info(struct sock *, struct tcp_info *); | 497 | extern void tcp_get_info(struct sock *, struct tcp_info *); |
@@ -521,6 +531,17 @@ static inline void tcp_fast_path_check(struct sock *sk) | |||
521 | tcp_fast_path_on(tp); | 531 | tcp_fast_path_on(tp); |
522 | } | 532 | } |
523 | 533 | ||
534 | /* Compute the actual rto_min value */ | ||
535 | static inline u32 tcp_rto_min(struct sock *sk) | ||
536 | { | ||
537 | struct dst_entry *dst = __sk_dst_get(sk); | ||
538 | u32 rto_min = TCP_RTO_MIN; | ||
539 | |||
540 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | ||
541 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | ||
542 | return rto_min; | ||
543 | } | ||
544 | |||
524 | /* Compute the actual receive window we are currently advertising. | 545 | /* Compute the actual receive window we are currently advertising. |
525 | * Rcv_nxt can be after the window if our peer push more data | 546 | * Rcv_nxt can be after the window if our peer push more data |
526 | * than the offered window. | 547 | * than the offered window. |
@@ -607,21 +628,6 @@ static inline int tcp_skb_mss(const struct sk_buff *skb) | |||
607 | return skb_shinfo(skb)->gso_size; | 628 | return skb_shinfo(skb)->gso_size; |
608 | } | 629 | } |
609 | 630 | ||
610 | static inline void tcp_dec_pcount_approx_int(__u32 *count, const int decr) | ||
611 | { | ||
612 | if (*count) { | ||
613 | *count -= decr; | ||
614 | if ((int)*count < 0) | ||
615 | *count = 0; | ||
616 | } | ||
617 | } | ||
618 | |||
619 | static inline void tcp_dec_pcount_approx(__u32 *count, | ||
620 | const struct sk_buff *skb) | ||
621 | { | ||
622 | tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb)); | ||
623 | } | ||
624 | |||
625 | /* Events passed to congestion control interface */ | 631 | /* Events passed to congestion control interface */ |
626 | enum tcp_ca_event { | 632 | enum tcp_ca_event { |
627 | CA_EVENT_TX_START, /* first transmit when no packets in flight */ | 633 | CA_EVENT_TX_START, /* first transmit when no packets in flight */ |
@@ -685,6 +691,7 @@ extern void tcp_get_allowed_congestion_control(char *buf, size_t len); | |||
685 | extern int tcp_set_allowed_congestion_control(char *allowed); | 691 | extern int tcp_set_allowed_congestion_control(char *allowed); |
686 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); | 692 | extern int tcp_set_congestion_control(struct sock *sk, const char *name); |
687 | extern void tcp_slow_start(struct tcp_sock *tp); | 693 | extern void tcp_slow_start(struct tcp_sock *tp); |
694 | extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); | ||
688 | 695 | ||
689 | extern struct tcp_congestion_ops tcp_init_congestion_ops; | 696 | extern struct tcp_congestion_ops tcp_init_congestion_ops; |
690 | extern u32 tcp_reno_ssthresh(struct sock *sk); | 697 | extern u32 tcp_reno_ssthresh(struct sock *sk); |
@@ -821,15 +828,15 @@ static inline void tcp_push_pending_frames(struct sock *sk) | |||
821 | { | 828 | { |
822 | struct tcp_sock *tp = tcp_sk(sk); | 829 | struct tcp_sock *tp = tcp_sk(sk); |
823 | 830 | ||
824 | __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle); | 831 | __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); |
825 | } | 832 | } |
826 | 833 | ||
827 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 834 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) |
828 | { | 835 | { |
829 | tp->snd_wl1 = seq; | 836 | tp->snd_wl1 = seq; |
830 | } | 837 | } |
831 | 838 | ||
832 | static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 839 | static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) |
833 | { | 840 | { |
834 | tp->snd_wl1 = seq; | 841 | tp->snd_wl1 = seq; |
835 | } | 842 | } |
@@ -900,7 +907,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | |||
900 | wake_up_interruptible(sk->sk_sleep); | 907 | wake_up_interruptible(sk->sk_sleep); |
901 | if (!inet_csk_ack_scheduled(sk)) | 908 | if (!inet_csk_ack_scheduled(sk)) |
902 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | 909 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
903 | (3 * TCP_RTO_MIN) / 4, | 910 | (3 * tcp_rto_min(sk)) / 4, |
904 | TCP_RTO_MAX); | 911 | TCP_RTO_MAX); |
905 | } | 912 | } |
906 | return 1; | 913 | return 1; |
@@ -925,7 +932,6 @@ extern void tcp_done(struct sock *sk); | |||
925 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) | 932 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) |
926 | { | 933 | { |
927 | rx_opt->dsack = 0; | 934 | rx_opt->dsack = 0; |
928 | rx_opt->eff_sacks = 0; | ||
929 | rx_opt->num_sacks = 0; | 935 | rx_opt->num_sacks = 0; |
930 | } | 936 | } |
931 | 937 | ||
@@ -997,11 +1003,21 @@ static inline int tcp_fin_time(const struct sock *sk) | |||
997 | return fin_timeout; | 1003 | return fin_timeout; |
998 | } | 1004 | } |
999 | 1005 | ||
1000 | static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst) | 1006 | static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, |
1007 | int paws_win) | ||
1001 | { | 1008 | { |
1002 | if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0) | 1009 | if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) |
1003 | return 0; | 1010 | return 1; |
1004 | if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) | 1011 | if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) |
1012 | return 1; | ||
1013 | |||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
1017 | static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, | ||
1018 | int rst) | ||
1019 | { | ||
1020 | if (tcp_paws_check(rx_opt, 0)) | ||
1005 | return 0; | 1021 | return 0; |
1006 | 1022 | ||
1007 | /* RST segments are not recommended to carry timestamp, | 1023 | /* RST segments are not recommended to carry timestamp, |