diff options
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 137 |
1 files changed, 68 insertions, 69 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index d695cea7730d..7de4ea3a04d9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -309,6 +309,9 @@ extern int tcp_twsk_unique(struct sock *sk, | |||
309 | 309 | ||
310 | extern void tcp_twsk_destructor(struct sock *sk); | 310 | extern void tcp_twsk_destructor(struct sock *sk); |
311 | 311 | ||
312 | extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, | ||
313 | struct pipe_inode_info *pipe, size_t len, unsigned int flags); | ||
314 | |||
312 | static inline void tcp_dec_quickack_mode(struct sock *sk, | 315 | static inline void tcp_dec_quickack_mode(struct sock *sk, |
313 | const unsigned int pkts) | 316 | const unsigned int pkts) |
314 | { | 317 | { |
@@ -575,10 +578,6 @@ struct tcp_skb_cb { | |||
575 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ | 578 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ |
576 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | 579 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) |
577 | 580 | ||
578 | #define TCPCB_URG 0x20 /* Urgent pointer advanced here */ | ||
579 | |||
580 | #define TCPCB_AT_TAIL (TCPCB_URG) | ||
581 | |||
582 | __u16 urg_ptr; /* Valid w/URG flags is set. */ | 581 | __u16 urg_ptr; /* Valid w/URG flags is set. */ |
583 | __u32 ack_seq; /* Sequence number ACK'd */ | 582 | __u32 ack_seq; /* Sequence number ACK'd */ |
584 | }; | 583 | }; |
@@ -649,7 +648,7 @@ struct tcp_congestion_ops { | |||
649 | /* lower bound for congestion window (optional) */ | 648 | /* lower bound for congestion window (optional) */ |
650 | u32 (*min_cwnd)(const struct sock *sk); | 649 | u32 (*min_cwnd)(const struct sock *sk); |
651 | /* do new cwnd calculation (required) */ | 650 | /* do new cwnd calculation (required) */ |
652 | void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight, int good_ack); | 651 | void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); |
653 | /* call before changing ca_state (optional) */ | 652 | /* call before changing ca_state (optional) */ |
654 | void (*set_state)(struct sock *sk, u8 new_state); | 653 | void (*set_state)(struct sock *sk, u8 new_state); |
655 | /* call when cwnd event occurs (optional) */ | 654 | /* call when cwnd event occurs (optional) */ |
@@ -680,7 +679,7 @@ extern void tcp_slow_start(struct tcp_sock *tp); | |||
680 | 679 | ||
681 | extern struct tcp_congestion_ops tcp_init_congestion_ops; | 680 | extern struct tcp_congestion_ops tcp_init_congestion_ops; |
682 | extern u32 tcp_reno_ssthresh(struct sock *sk); | 681 | extern u32 tcp_reno_ssthresh(struct sock *sk); |
683 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag); | 682 | extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); |
684 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); | 683 | extern u32 tcp_reno_min_cwnd(const struct sock *sk); |
685 | extern struct tcp_congestion_ops tcp_reno; | 684 | extern struct tcp_congestion_ops tcp_reno; |
686 | 685 | ||
@@ -782,26 +781,12 @@ static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | |||
782 | return 3; | 781 | return 3; |
783 | } | 782 | } |
784 | 783 | ||
785 | /* RFC2861 Check whether we are limited by application or congestion window | 784 | /* Returns end sequence number of the receiver's advertised window */ |
786 | * This is the inverse of cwnd check in tcp_tso_should_defer | 785 | static inline u32 tcp_wnd_end(const struct tcp_sock *tp) |
787 | */ | ||
788 | static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) | ||
789 | { | 786 | { |
790 | const struct tcp_sock *tp = tcp_sk(sk); | 787 | return tp->snd_una + tp->snd_wnd; |
791 | u32 left; | ||
792 | |||
793 | if (in_flight >= tp->snd_cwnd) | ||
794 | return 1; | ||
795 | |||
796 | if (!sk_can_gso(sk)) | ||
797 | return 0; | ||
798 | |||
799 | left = tp->snd_cwnd - in_flight; | ||
800 | if (sysctl_tcp_tso_win_divisor) | ||
801 | return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd; | ||
802 | else | ||
803 | return left <= tcp_max_burst(tp); | ||
804 | } | 788 | } |
789 | extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); | ||
805 | 790 | ||
806 | static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, | 791 | static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, |
807 | const struct sk_buff *skb) | 792 | const struct sk_buff *skb) |
@@ -921,40 +906,7 @@ static const char *statename[]={ | |||
921 | "Close Wait","Last ACK","Listen","Closing" | 906 | "Close Wait","Last ACK","Listen","Closing" |
922 | }; | 907 | }; |
923 | #endif | 908 | #endif |
924 | 909 | extern void tcp_set_state(struct sock *sk, int state); | |
925 | static inline void tcp_set_state(struct sock *sk, int state) | ||
926 | { | ||
927 | int oldstate = sk->sk_state; | ||
928 | |||
929 | switch (state) { | ||
930 | case TCP_ESTABLISHED: | ||
931 | if (oldstate != TCP_ESTABLISHED) | ||
932 | TCP_INC_STATS(TCP_MIB_CURRESTAB); | ||
933 | break; | ||
934 | |||
935 | case TCP_CLOSE: | ||
936 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | ||
937 | TCP_INC_STATS(TCP_MIB_ESTABRESETS); | ||
938 | |||
939 | sk->sk_prot->unhash(sk); | ||
940 | if (inet_csk(sk)->icsk_bind_hash && | ||
941 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | ||
942 | inet_put_port(&tcp_hashinfo, sk); | ||
943 | /* fall through */ | ||
944 | default: | ||
945 | if (oldstate==TCP_ESTABLISHED) | ||
946 | TCP_DEC_STATS(TCP_MIB_CURRESTAB); | ||
947 | } | ||
948 | |||
949 | /* Change state AFTER socket is unhashed to avoid closed | ||
950 | * socket sitting in hash tables. | ||
951 | */ | ||
952 | sk->sk_state = state; | ||
953 | |||
954 | #ifdef STATE_TRACE | ||
955 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); | ||
956 | #endif | ||
957 | } | ||
958 | 910 | ||
959 | extern void tcp_done(struct sock *sk); | 911 | extern void tcp_done(struct sock *sk); |
960 | 912 | ||
@@ -1078,7 +1030,6 @@ static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) | |||
1078 | static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) | 1030 | static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) |
1079 | { | 1031 | { |
1080 | tcp_clear_retrans_hints_partial(tp); | 1032 | tcp_clear_retrans_hints_partial(tp); |
1081 | tp->fastpath_skb_hint = NULL; | ||
1082 | } | 1033 | } |
1083 | 1034 | ||
1084 | /* MD5 Signature */ | 1035 | /* MD5 Signature */ |
@@ -1153,7 +1104,8 @@ extern int tcp_v4_calc_md5_hash(char *md5_hash, | |||
1153 | struct dst_entry *dst, | 1104 | struct dst_entry *dst, |
1154 | struct request_sock *req, | 1105 | struct request_sock *req, |
1155 | struct tcphdr *th, | 1106 | struct tcphdr *th, |
1156 | int protocol, int tcplen); | 1107 | int protocol, |
1108 | unsigned int tcplen); | ||
1157 | extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, | 1109 | extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, |
1158 | struct sock *addr_sk); | 1110 | struct sock *addr_sk); |
1159 | 1111 | ||
@@ -1193,8 +1145,8 @@ static inline void tcp_write_queue_purge(struct sock *sk) | |||
1193 | struct sk_buff *skb; | 1145 | struct sk_buff *skb; |
1194 | 1146 | ||
1195 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | 1147 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) |
1196 | sk_stream_free_skb(sk, skb); | 1148 | sk_wmem_free_skb(sk, skb); |
1197 | sk_stream_mem_reclaim(sk); | 1149 | sk_mem_reclaim(sk); |
1198 | } | 1150 | } |
1199 | 1151 | ||
1200 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | 1152 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) |
@@ -1227,6 +1179,11 @@ static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_bu | |||
1227 | for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ | 1179 | for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ |
1228 | skb = skb->next) | 1180 | skb = skb->next) |
1229 | 1181 | ||
1182 | #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ | ||
1183 | for (tmp = skb->next; \ | ||
1184 | (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ | ||
1185 | skb = tmp, tmp = skb->next) | ||
1186 | |||
1230 | static inline struct sk_buff *tcp_send_head(struct sock *sk) | 1187 | static inline struct sk_buff *tcp_send_head(struct sock *sk) |
1231 | { | 1188 | { |
1232 | return sk->sk_send_head; | 1189 | return sk->sk_send_head; |
@@ -1234,14 +1191,9 @@ static inline struct sk_buff *tcp_send_head(struct sock *sk) | |||
1234 | 1191 | ||
1235 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) | 1192 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) |
1236 | { | 1193 | { |
1237 | struct tcp_sock *tp = tcp_sk(sk); | ||
1238 | |||
1239 | sk->sk_send_head = skb->next; | 1194 | sk->sk_send_head = skb->next; |
1240 | if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) | 1195 | if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) |
1241 | sk->sk_send_head = NULL; | 1196 | sk->sk_send_head = NULL; |
1242 | /* Don't override Nagle indefinately with F-RTO */ | ||
1243 | if (tp->frto_counter == 2) | ||
1244 | tp->frto_counter = 3; | ||
1245 | } | 1197 | } |
1246 | 1198 | ||
1247 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) | 1199 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) |
@@ -1265,8 +1217,12 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb | |||
1265 | __tcp_add_write_queue_tail(sk, skb); | 1217 | __tcp_add_write_queue_tail(sk, skb); |
1266 | 1218 | ||
1267 | /* Queue it, remembering where we must start sending. */ | 1219 | /* Queue it, remembering where we must start sending. */ |
1268 | if (sk->sk_send_head == NULL) | 1220 | if (sk->sk_send_head == NULL) { |
1269 | sk->sk_send_head = skb; | 1221 | sk->sk_send_head = skb; |
1222 | |||
1223 | if (tcp_sk(sk)->highest_sack == NULL) | ||
1224 | tcp_sk(sk)->highest_sack = skb; | ||
1225 | } | ||
1270 | } | 1226 | } |
1271 | 1227 | ||
1272 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) | 1228 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) |
@@ -1288,6 +1244,9 @@ static inline void tcp_insert_write_queue_before(struct sk_buff *new, | |||
1288 | struct sock *sk) | 1244 | struct sock *sk) |
1289 | { | 1245 | { |
1290 | __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); | 1246 | __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); |
1247 | |||
1248 | if (sk->sk_send_head == skb) | ||
1249 | sk->sk_send_head = new; | ||
1291 | } | 1250 | } |
1292 | 1251 | ||
1293 | static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) | 1252 | static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) |
@@ -1306,6 +1265,45 @@ static inline int tcp_write_queue_empty(struct sock *sk) | |||
1306 | return skb_queue_empty(&sk->sk_write_queue); | 1265 | return skb_queue_empty(&sk->sk_write_queue); |
1307 | } | 1266 | } |
1308 | 1267 | ||
1268 | /* Start sequence of the highest skb with SACKed bit, valid only if | ||
1269 | * sacked > 0 or when the caller has ensured validity by itself. | ||
1270 | */ | ||
1271 | static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) | ||
1272 | { | ||
1273 | if (!tp->sacked_out) | ||
1274 | return tp->snd_una; | ||
1275 | |||
1276 | if (tp->highest_sack == NULL) | ||
1277 | return tp->snd_nxt; | ||
1278 | |||
1279 | return TCP_SKB_CB(tp->highest_sack)->seq; | ||
1280 | } | ||
1281 | |||
1282 | static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) | ||
1283 | { | ||
1284 | tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL : | ||
1285 | tcp_write_queue_next(sk, skb); | ||
1286 | } | ||
1287 | |||
1288 | static inline struct sk_buff *tcp_highest_sack(struct sock *sk) | ||
1289 | { | ||
1290 | return tcp_sk(sk)->highest_sack; | ||
1291 | } | ||
1292 | |||
1293 | static inline void tcp_highest_sack_reset(struct sock *sk) | ||
1294 | { | ||
1295 | tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); | ||
1296 | } | ||
1297 | |||
1298 | /* Called when old skb is about to be deleted (to be combined with new skb) */ | ||
1299 | static inline void tcp_highest_sack_combine(struct sock *sk, | ||
1300 | struct sk_buff *old, | ||
1301 | struct sk_buff *new) | ||
1302 | { | ||
1303 | if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) | ||
1304 | tcp_sk(sk)->highest_sack = new; | ||
1305 | } | ||
1306 | |||
1309 | /* /proc */ | 1307 | /* /proc */ |
1310 | enum tcp_seq_states { | 1308 | enum tcp_seq_states { |
1311 | TCP_SEQ_STATE_LISTENING, | 1309 | TCP_SEQ_STATE_LISTENING, |
@@ -1356,7 +1354,8 @@ struct tcp_sock_af_ops { | |||
1356 | struct dst_entry *dst, | 1354 | struct dst_entry *dst, |
1357 | struct request_sock *req, | 1355 | struct request_sock *req, |
1358 | struct tcphdr *th, | 1356 | struct tcphdr *th, |
1359 | int protocol, int len); | 1357 | int protocol, |
1358 | unsigned int len); | ||
1360 | int (*md5_add) (struct sock *sk, | 1359 | int (*md5_add) (struct sock *sk, |
1361 | struct sock *addr_sk, | 1360 | struct sock *addr_sk, |
1362 | u8 *newkey, | 1361 | u8 *newkey, |