diff options
author | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
---|---|---|
committer | Dmitry Torokhov <dtor@insightbb.com> | 2007-05-01 00:24:54 -0400 |
commit | bc95f3669f5e6f63cf0b84fe4922c3c6dd4aa775 (patch) | |
tree | 427fcf2a7287c16d4b5aa6cbf494d59579a6a8b1 /include/net/tcp.h | |
parent | 3d29cdff999c37b3876082278a8134a0642a02cd (diff) | |
parent | dc87c3985e9b442c60994308a96f887579addc39 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/usb/input/Makefile
drivers/usb/input/gtco.c
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 199 |
1 files changed, 149 insertions, 50 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 5c472f255b77..ef8f9d4dae85 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -220,6 +220,7 @@ extern int sysctl_tcp_app_win; | |||
220 | extern int sysctl_tcp_adv_win_scale; | 220 | extern int sysctl_tcp_adv_win_scale; |
221 | extern int sysctl_tcp_tw_reuse; | 221 | extern int sysctl_tcp_tw_reuse; |
222 | extern int sysctl_tcp_frto; | 222 | extern int sysctl_tcp_frto; |
223 | extern int sysctl_tcp_frto_response; | ||
223 | extern int sysctl_tcp_low_latency; | 224 | extern int sysctl_tcp_low_latency; |
224 | extern int sysctl_tcp_dma_copybreak; | 225 | extern int sysctl_tcp_dma_copybreak; |
225 | extern int sysctl_tcp_nometrics_save; | 226 | extern int sysctl_tcp_nometrics_save; |
@@ -230,6 +231,7 @@ extern int sysctl_tcp_mtu_probing; | |||
230 | extern int sysctl_tcp_base_mss; | 231 | extern int sysctl_tcp_base_mss; |
231 | extern int sysctl_tcp_workaround_signed_windows; | 232 | extern int sysctl_tcp_workaround_signed_windows; |
232 | extern int sysctl_tcp_slow_start_after_idle; | 233 | extern int sysctl_tcp_slow_start_after_idle; |
234 | extern int sysctl_tcp_max_ssthresh; | ||
233 | 235 | ||
234 | extern atomic_t tcp_memory_allocated; | 236 | extern atomic_t tcp_memory_allocated; |
235 | extern atomic_t tcp_sockets_allocated; | 237 | extern atomic_t tcp_sockets_allocated; |
@@ -341,6 +343,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
341 | extern int tcp_child_process(struct sock *parent, | 343 | extern int tcp_child_process(struct sock *parent, |
342 | struct sock *child, | 344 | struct sock *child, |
343 | struct sk_buff *skb); | 345 | struct sk_buff *skb); |
346 | extern int tcp_use_frto(struct sock *sk); | ||
344 | extern void tcp_enter_frto(struct sock *sk); | 347 | extern void tcp_enter_frto(struct sock *sk); |
345 | extern void tcp_enter_loss(struct sock *sk, int how); | 348 | extern void tcp_enter_loss(struct sock *sk, int how); |
346 | extern void tcp_clear_retrans(struct tcp_sock *tp); | 349 | extern void tcp_clear_retrans(struct tcp_sock *tp); |
@@ -417,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |||
417 | 420 | ||
418 | /* tcp_output.c */ | 421 | /* tcp_output.c */ |
419 | 422 | ||
420 | extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | 423 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
421 | unsigned int cur_mss, int nonagle); | 424 | int nonagle); |
422 | extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); | 425 | extern int tcp_may_send_now(struct sock *sk); |
423 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | 426 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); |
424 | extern void tcp_xmit_retransmit_queue(struct sock *); | 427 | extern void tcp_xmit_retransmit_queue(struct sock *); |
425 | extern void tcp_simple_retransmit(struct sock *); | 428 | extern void tcp_simple_retransmit(struct sock *); |
@@ -476,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp) | |||
476 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); | 479 | __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); |
477 | } | 480 | } |
478 | 481 | ||
479 | static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) | 482 | static inline void tcp_fast_path_check(struct sock *sk) |
480 | { | 483 | { |
484 | struct tcp_sock *tp = tcp_sk(sk); | ||
485 | |||
481 | if (skb_queue_empty(&tp->out_of_order_queue) && | 486 | if (skb_queue_empty(&tp->out_of_order_queue) && |
482 | tp->rcv_wnd && | 487 | tp->rcv_wnd && |
483 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | 488 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && |
@@ -588,10 +593,10 @@ static inline void tcp_dec_pcount_approx(__u32 *count, | |||
588 | } | 593 | } |
589 | } | 594 | } |
590 | 595 | ||
591 | static inline void tcp_packets_out_inc(struct sock *sk, | 596 | static inline void tcp_packets_out_inc(struct sock *sk, |
592 | struct tcp_sock *tp, | ||
593 | const struct sk_buff *skb) | 597 | const struct sk_buff *skb) |
594 | { | 598 | { |
599 | struct tcp_sock *tp = tcp_sk(sk); | ||
595 | int orig = tp->packets_out; | 600 | int orig = tp->packets_out; |
596 | 601 | ||
597 | tp->packets_out += tcp_skb_pcount(skb); | 602 | tp->packets_out += tcp_skb_pcount(skb); |
@@ -624,9 +629,12 @@ enum tcp_ca_event { | |||
624 | #define TCP_CA_MAX 128 | 629 | #define TCP_CA_MAX 128 |
625 | #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) | 630 | #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) |
626 | 631 | ||
632 | #define TCP_CONG_NON_RESTRICTED 0x1 | ||
633 | #define TCP_CONG_RTT_STAMP 0x2 | ||
634 | |||
627 | struct tcp_congestion_ops { | 635 | struct tcp_congestion_ops { |
628 | struct list_head list; | 636 | struct list_head list; |
629 | int non_restricted; | 637 | unsigned long flags; |
630 | 638 | ||
631 | /* initialize private data (optional) */ | 639 | /* initialize private data (optional) */ |
632 | void (*init)(struct sock *sk); | 640 | void (*init)(struct sock *sk); |
@@ -640,8 +648,6 @@ struct tcp_congestion_ops { | |||
640 | /* do new cwnd calculation (required) */ | 648 | /* do new cwnd calculation (required) */ |
641 | void (*cong_avoid)(struct sock *sk, u32 ack, | 649 | void (*cong_avoid)(struct sock *sk, u32 ack, |
642 | u32 rtt, u32 in_flight, int good_ack); | 650 | u32 rtt, u32 in_flight, int good_ack); |
643 | /* round trip time sample per acked packet (optional) */ | ||
644 | void (*rtt_sample)(struct sock *sk, u32 usrtt); | ||
645 | /* call before changing ca_state (optional) */ | 651 | /* call before changing ca_state (optional) */ |
646 | void (*set_state)(struct sock *sk, u8 new_state); | 652 | void (*set_state)(struct sock *sk, u8 new_state); |
647 | /* call when cwnd event occurs (optional) */ | 653 | /* call when cwnd event occurs (optional) */ |
@@ -649,7 +655,7 @@ struct tcp_congestion_ops { | |||
649 | /* new value of cwnd after loss (optional) */ | 655 | /* new value of cwnd after loss (optional) */ |
650 | u32 (*undo_cwnd)(struct sock *sk); | 656 | u32 (*undo_cwnd)(struct sock *sk); |
651 | /* hook for packet ack accounting (optional) */ | 657 | /* hook for packet ack accounting (optional) */ |
652 | void (*pkts_acked)(struct sock *sk, u32 num_acked); | 658 | void (*pkts_acked)(struct sock *sk, u32 num_acked, ktime_t last); |
653 | /* get info for inet_diag (optional) */ | 659 | /* get info for inet_diag (optional) */ |
654 | void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); | 660 | void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); |
655 | 661 | ||
@@ -730,13 +736,11 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) | |||
730 | 736 | ||
731 | static inline void tcp_sync_left_out(struct tcp_sock *tp) | 737 | static inline void tcp_sync_left_out(struct tcp_sock *tp) |
732 | { | 738 | { |
733 | if (tp->rx_opt.sack_ok && | 739 | BUG_ON(tp->sacked_out + tp->lost_out > tp->packets_out); |
734 | (tp->sacked_out >= tp->packets_out - tp->lost_out)) | ||
735 | tp->sacked_out = tp->packets_out - tp->lost_out; | ||
736 | tp->left_out = tp->sacked_out + tp->lost_out; | 740 | tp->left_out = tp->sacked_out + tp->lost_out; |
737 | } | 741 | } |
738 | 742 | ||
739 | extern void tcp_enter_cwr(struct sock *sk); | 743 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); |
740 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | 744 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); |
741 | 745 | ||
742 | /* Slow start with delack produces 3 packets of burst, so that | 746 | /* Slow start with delack produces 3 packets of burst, so that |
@@ -775,18 +779,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, | |||
775 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 779 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; |
776 | } | 780 | } |
777 | 781 | ||
778 | static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) | 782 | static inline void tcp_check_probe_timer(struct sock *sk) |
779 | { | 783 | { |
784 | struct tcp_sock *tp = tcp_sk(sk); | ||
780 | const struct inet_connection_sock *icsk = inet_csk(sk); | 785 | const struct inet_connection_sock *icsk = inet_csk(sk); |
786 | |||
781 | if (!tp->packets_out && !icsk->icsk_pending) | 787 | if (!tp->packets_out && !icsk->icsk_pending) |
782 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 788 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
783 | icsk->icsk_rto, TCP_RTO_MAX); | 789 | icsk->icsk_rto, TCP_RTO_MAX); |
784 | } | 790 | } |
785 | 791 | ||
786 | static inline void tcp_push_pending_frames(struct sock *sk, | 792 | static inline void tcp_push_pending_frames(struct sock *sk) |
787 | struct tcp_sock *tp) | ||
788 | { | 793 | { |
789 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); | 794 | struct tcp_sock *tp = tcp_sk(sk); |
795 | |||
796 | __tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle); | ||
790 | } | 797 | } |
791 | 798 | ||
792 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 799 | static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) |
@@ -815,7 +822,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) | |||
815 | 822 | ||
816 | static inline int tcp_checksum_complete(struct sk_buff *skb) | 823 | static inline int tcp_checksum_complete(struct sk_buff *skb) |
817 | { | 824 | { |
818 | return skb->ip_summed != CHECKSUM_UNNECESSARY && | 825 | return !skb_csum_unnecessary(skb) && |
819 | __tcp_checksum_complete(skb); | 826 | __tcp_checksum_complete(skb); |
820 | } | 827 | } |
821 | 828 | ||
@@ -918,21 +925,7 @@ static inline void tcp_set_state(struct sock *sk, int state) | |||
918 | #endif | 925 | #endif |
919 | } | 926 | } |
920 | 927 | ||
921 | static inline void tcp_done(struct sock *sk) | 928 | extern void tcp_done(struct sock *sk); |
922 | { | ||
923 | if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) | ||
924 | TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); | ||
925 | |||
926 | tcp_set_state(sk, TCP_CLOSE); | ||
927 | tcp_clear_xmit_timers(sk); | ||
928 | |||
929 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
930 | |||
931 | if (!sock_flag(sk, SOCK_DEAD)) | ||
932 | sk->sk_state_change(sk); | ||
933 | else | ||
934 | inet_csk_destroy_sock(sk); | ||
935 | } | ||
936 | 929 | ||
937 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) | 930 | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) |
938 | { | 931 | { |
@@ -981,7 +974,7 @@ static inline void tcp_openreq_init(struct request_sock *req, | |||
981 | ireq->wscale_ok = rx_opt->wscale_ok; | 974 | ireq->wscale_ok = rx_opt->wscale_ok; |
982 | ireq->acked = 0; | 975 | ireq->acked = 0; |
983 | ireq->ecn_ok = 0; | 976 | ireq->ecn_ok = 0; |
984 | ireq->rmt_port = skb->h.th->source; | 977 | ireq->rmt_port = tcp_hdr(skb)->source; |
985 | } | 978 | } |
986 | 979 | ||
987 | extern void tcp_enter_memory_pressure(void); | 980 | extern void tcp_enter_memory_pressure(void); |
@@ -1011,7 +1004,7 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int | |||
1011 | { | 1004 | { |
1012 | if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0) | 1005 | if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0) |
1013 | return 0; | 1006 | return 0; |
1014 | if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) | 1007 | if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS) |
1015 | return 0; | 1008 | return 0; |
1016 | 1009 | ||
1017 | /* RST segments are not recommended to carry timestamp, | 1010 | /* RST segments are not recommended to carry timestamp, |
@@ -1026,26 +1019,13 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int | |||
1026 | 1019 | ||
1027 | However, we can relax time bounds for RST segments to MSL. | 1020 | However, we can relax time bounds for RST segments to MSL. |
1028 | */ | 1021 | */ |
1029 | if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) | 1022 | if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) |
1030 | return 0; | 1023 | return 0; |
1031 | return 1; | 1024 | return 1; |
1032 | } | 1025 | } |
1033 | 1026 | ||
1034 | #define TCP_CHECK_TIMER(sk) do { } while (0) | 1027 | #define TCP_CHECK_TIMER(sk) do { } while (0) |
1035 | 1028 | ||
1036 | static inline int tcp_use_frto(const struct sock *sk) | ||
1037 | { | ||
1038 | const struct tcp_sock *tp = tcp_sk(sk); | ||
1039 | |||
1040 | /* F-RTO must be activated in sysctl and there must be some | ||
1041 | * unsent new data, and the advertised window should allow | ||
1042 | * sending it. | ||
1043 | */ | ||
1044 | return (sysctl_tcp_frto && sk->sk_send_head && | ||
1045 | !after(TCP_SKB_CB(sk->sk_send_head)->end_seq, | ||
1046 | tp->snd_una + tp->snd_wnd)); | ||
1047 | } | ||
1048 | |||
1049 | static inline void tcp_mib_init(void) | 1029 | static inline void tcp_mib_init(void) |
1050 | { | 1030 | { |
1051 | /* See RFC 2012 */ | 1031 | /* See RFC 2012 */ |
@@ -1172,6 +1152,125 @@ static inline void tcp_put_md5sig_pool(void) | |||
1172 | put_cpu(); | 1152 | put_cpu(); |
1173 | } | 1153 | } |
1174 | 1154 | ||
1155 | /* write queue abstraction */ | ||
1156 | static inline void tcp_write_queue_purge(struct sock *sk) | ||
1157 | { | ||
1158 | struct sk_buff *skb; | ||
1159 | |||
1160 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | ||
1161 | sk_stream_free_skb(sk, skb); | ||
1162 | sk_stream_mem_reclaim(sk); | ||
1163 | } | ||
1164 | |||
1165 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | ||
1166 | { | ||
1167 | struct sk_buff *skb = sk->sk_write_queue.next; | ||
1168 | if (skb == (struct sk_buff *) &sk->sk_write_queue) | ||
1169 | return NULL; | ||
1170 | return skb; | ||
1171 | } | ||
1172 | |||
1173 | static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) | ||
1174 | { | ||
1175 | struct sk_buff *skb = sk->sk_write_queue.prev; | ||
1176 | if (skb == (struct sk_buff *) &sk->sk_write_queue) | ||
1177 | return NULL; | ||
1178 | return skb; | ||
1179 | } | ||
1180 | |||
1181 | static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) | ||
1182 | { | ||
1183 | return skb->next; | ||
1184 | } | ||
1185 | |||
1186 | #define tcp_for_write_queue(skb, sk) \ | ||
1187 | for (skb = (sk)->sk_write_queue.next; \ | ||
1188 | (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ | ||
1189 | skb = skb->next) | ||
1190 | |||
1191 | #define tcp_for_write_queue_from(skb, sk) \ | ||
1192 | for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ | ||
1193 | skb = skb->next) | ||
1194 | |||
1195 | static inline struct sk_buff *tcp_send_head(struct sock *sk) | ||
1196 | { | ||
1197 | return sk->sk_send_head; | ||
1198 | } | ||
1199 | |||
1200 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) | ||
1201 | { | ||
1202 | struct tcp_sock *tp = tcp_sk(sk); | ||
1203 | |||
1204 | sk->sk_send_head = skb->next; | ||
1205 | if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) | ||
1206 | sk->sk_send_head = NULL; | ||
1207 | /* Don't override Nagle indefinately with F-RTO */ | ||
1208 | if (tp->frto_counter == 2) | ||
1209 | tp->frto_counter = 3; | ||
1210 | } | ||
1211 | |||
1212 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) | ||
1213 | { | ||
1214 | if (sk->sk_send_head == skb_unlinked) | ||
1215 | sk->sk_send_head = NULL; | ||
1216 | } | ||
1217 | |||
1218 | static inline void tcp_init_send_head(struct sock *sk) | ||
1219 | { | ||
1220 | sk->sk_send_head = NULL; | ||
1221 | } | ||
1222 | |||
1223 | static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | ||
1224 | { | ||
1225 | __skb_queue_tail(&sk->sk_write_queue, skb); | ||
1226 | } | ||
1227 | |||
1228 | static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | ||
1229 | { | ||
1230 | __tcp_add_write_queue_tail(sk, skb); | ||
1231 | |||
1232 | /* Queue it, remembering where we must start sending. */ | ||
1233 | if (sk->sk_send_head == NULL) | ||
1234 | sk->sk_send_head = skb; | ||
1235 | } | ||
1236 | |||
1237 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) | ||
1238 | { | ||
1239 | __skb_queue_head(&sk->sk_write_queue, skb); | ||
1240 | } | ||
1241 | |||
1242 | /* Insert buff after skb on the write queue of sk. */ | ||
1243 | static inline void tcp_insert_write_queue_after(struct sk_buff *skb, | ||
1244 | struct sk_buff *buff, | ||
1245 | struct sock *sk) | ||
1246 | { | ||
1247 | __skb_append(skb, buff, &sk->sk_write_queue); | ||
1248 | } | ||
1249 | |||
1250 | /* Insert skb between prev and next on the write queue of sk. */ | ||
1251 | static inline void tcp_insert_write_queue_before(struct sk_buff *new, | ||
1252 | struct sk_buff *skb, | ||
1253 | struct sock *sk) | ||
1254 | { | ||
1255 | __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); | ||
1256 | } | ||
1257 | |||
1258 | static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) | ||
1259 | { | ||
1260 | __skb_unlink(skb, &sk->sk_write_queue); | ||
1261 | } | ||
1262 | |||
1263 | static inline int tcp_skb_is_last(const struct sock *sk, | ||
1264 | const struct sk_buff *skb) | ||
1265 | { | ||
1266 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | ||
1267 | } | ||
1268 | |||
1269 | static inline int tcp_write_queue_empty(struct sock *sk) | ||
1270 | { | ||
1271 | return skb_queue_empty(&sk->sk_write_queue); | ||
1272 | } | ||
1273 | |||
1175 | /* /proc */ | 1274 | /* /proc */ |
1176 | enum tcp_seq_states { | 1275 | enum tcp_seq_states { |
1177 | TCP_SEQ_STATE_LISTENING, | 1276 | TCP_SEQ_STATE_LISTENING, |