diff options
Diffstat (limited to 'include/net/tcp.h')
| -rw-r--r-- | include/net/tcp.h | 161 |
1 files changed, 20 insertions, 141 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index ec9e20c27179..f4f9aba07ac2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -721,11 +721,16 @@ static inline int tcp_ack_scheduled(struct tcp_sock *tp) | |||
| 721 | return tp->ack.pending&TCP_ACK_SCHED; | 721 | return tp->ack.pending&TCP_ACK_SCHED; |
| 722 | } | 722 | } |
| 723 | 723 | ||
| 724 | static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp) | 724 | static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts) |
| 725 | { | 725 | { |
| 726 | if (tp->ack.quick && --tp->ack.quick == 0) { | 726 | if (tp->ack.quick) { |
| 727 | /* Leaving quickack mode we deflate ATO. */ | 727 | if (pkts >= tp->ack.quick) { |
| 728 | tp->ack.ato = TCP_ATO_MIN; | 728 | tp->ack.quick = 0; |
| 729 | |||
| 730 | /* Leaving quickack mode we deflate ATO. */ | ||
| 731 | tp->ack.ato = TCP_ATO_MIN; | ||
| 732 | } else | ||
| 733 | tp->ack.quick -= pkts; | ||
| 729 | } | 734 | } |
| 730 | } | 735 | } |
| 731 | 736 | ||
| @@ -843,7 +848,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |||
| 843 | 848 | ||
| 844 | /* tcp_output.c */ | 849 | /* tcp_output.c */ |
| 845 | 850 | ||
| 846 | extern int tcp_write_xmit(struct sock *, int nonagle); | 851 | extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, |
| 852 | unsigned int cur_mss, int nonagle); | ||
| 853 | extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); | ||
| 847 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | 854 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); |
| 848 | extern void tcp_xmit_retransmit_queue(struct sock *); | 855 | extern void tcp_xmit_retransmit_queue(struct sock *); |
| 849 | extern void tcp_simple_retransmit(struct sock *); | 856 | extern void tcp_simple_retransmit(struct sock *); |
| @@ -853,12 +860,16 @@ extern void tcp_send_probe0(struct sock *); | |||
| 853 | extern void tcp_send_partial(struct sock *); | 860 | extern void tcp_send_partial(struct sock *); |
| 854 | extern int tcp_write_wakeup(struct sock *); | 861 | extern int tcp_write_wakeup(struct sock *); |
| 855 | extern void tcp_send_fin(struct sock *sk); | 862 | extern void tcp_send_fin(struct sock *sk); |
| 856 | extern void tcp_send_active_reset(struct sock *sk, int priority); | 863 | extern void tcp_send_active_reset(struct sock *sk, |
| 864 | unsigned int __nocast priority); | ||
| 857 | extern int tcp_send_synack(struct sock *); | 865 | extern int tcp_send_synack(struct sock *); |
| 858 | extern void tcp_push_one(struct sock *, unsigned mss_now); | 866 | extern void tcp_push_one(struct sock *, unsigned int mss_now); |
| 859 | extern void tcp_send_ack(struct sock *sk); | 867 | extern void tcp_send_ack(struct sock *sk); |
| 860 | extern void tcp_send_delayed_ack(struct sock *sk); | 868 | extern void tcp_send_delayed_ack(struct sock *sk); |
| 861 | 869 | ||
| 870 | /* tcp_input.c */ | ||
| 871 | extern void tcp_cwnd_application_limited(struct sock *sk); | ||
| 872 | |||
| 862 | /* tcp_timer.c */ | 873 | /* tcp_timer.c */ |
| 863 | extern void tcp_init_xmit_timers(struct sock *); | 874 | extern void tcp_init_xmit_timers(struct sock *); |
| 864 | extern void tcp_clear_xmit_timers(struct sock *); | 875 | extern void tcp_clear_xmit_timers(struct sock *); |
| @@ -958,7 +969,7 @@ static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long | |||
| 958 | static inline void tcp_initialize_rcv_mss(struct sock *sk) | 969 | static inline void tcp_initialize_rcv_mss(struct sock *sk) |
| 959 | { | 970 | { |
| 960 | struct tcp_sock *tp = tcp_sk(sk); | 971 | struct tcp_sock *tp = tcp_sk(sk); |
| 961 | unsigned int hint = min(tp->advmss, tp->mss_cache_std); | 972 | unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); |
| 962 | 973 | ||
| 963 | hint = min(hint, tp->rcv_wnd/2); | 974 | hint = min(hint, tp->rcv_wnd/2); |
| 964 | hint = min(hint, TCP_MIN_RCVMSS); | 975 | hint = min(hint, TCP_MIN_RCVMSS); |
| @@ -981,7 +992,7 @@ static __inline__ void tcp_fast_path_on(struct tcp_sock *tp) | |||
| 981 | 992 | ||
| 982 | static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) | 993 | static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) |
| 983 | { | 994 | { |
| 984 | if (skb_queue_len(&tp->out_of_order_queue) == 0 && | 995 | if (skb_queue_empty(&tp->out_of_order_queue) && |
| 985 | tp->rcv_wnd && | 996 | tp->rcv_wnd && |
| 986 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && | 997 | atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && |
| 987 | !tp->urg_data) | 998 | !tp->urg_data) |
| @@ -1225,28 +1236,6 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp) | |||
| 1225 | tp->left_out = tp->sacked_out + tp->lost_out; | 1236 | tp->left_out = tp->sacked_out + tp->lost_out; |
| 1226 | } | 1237 | } |
| 1227 | 1238 | ||
| 1228 | extern void tcp_cwnd_application_limited(struct sock *sk); | ||
| 1229 | |||
| 1230 | /* Congestion window validation. (RFC2861) */ | ||
| 1231 | |||
| 1232 | static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) | ||
| 1233 | { | ||
| 1234 | __u32 packets_out = tp->packets_out; | ||
| 1235 | |||
| 1236 | if (packets_out >= tp->snd_cwnd) { | ||
| 1237 | /* Network is feed fully. */ | ||
| 1238 | tp->snd_cwnd_used = 0; | ||
| 1239 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
| 1240 | } else { | ||
| 1241 | /* Network starves. */ | ||
| 1242 | if (tp->packets_out > tp->snd_cwnd_used) | ||
| 1243 | tp->snd_cwnd_used = tp->packets_out; | ||
| 1244 | |||
| 1245 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) | ||
| 1246 | tcp_cwnd_application_limited(sk); | ||
| 1247 | } | ||
| 1248 | } | ||
| 1249 | |||
| 1250 | /* Set slow start threshould and cwnd not falling to slow start */ | 1239 | /* Set slow start threshould and cwnd not falling to slow start */ |
| 1251 | static inline void __tcp_enter_cwr(struct tcp_sock *tp) | 1240 | static inline void __tcp_enter_cwr(struct tcp_sock *tp) |
| 1252 | { | 1241 | { |
| @@ -1279,12 +1268,6 @@ static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | |||
| 1279 | return 3; | 1268 | return 3; |
| 1280 | } | 1269 | } |
| 1281 | 1270 | ||
| 1282 | static __inline__ int tcp_minshall_check(const struct tcp_sock *tp) | ||
| 1283 | { | ||
| 1284 | return after(tp->snd_sml,tp->snd_una) && | ||
| 1285 | !after(tp->snd_sml, tp->snd_nxt); | ||
| 1286 | } | ||
| 1287 | |||
| 1288 | static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, | 1271 | static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, |
| 1289 | const struct sk_buff *skb) | 1272 | const struct sk_buff *skb) |
| 1290 | { | 1273 | { |
| @@ -1292,122 +1275,18 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, | |||
| 1292 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 1275 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; |
| 1293 | } | 1276 | } |
| 1294 | 1277 | ||
| 1295 | /* Return 0, if packet can be sent now without violation Nagle's rules: | ||
| 1296 | 1. It is full sized. | ||
| 1297 | 2. Or it contains FIN. | ||
| 1298 | 3. Or TCP_NODELAY was set. | ||
| 1299 | 4. Or TCP_CORK is not set, and all sent packets are ACKed. | ||
| 1300 | With Minshall's modification: all sent small packets are ACKed. | ||
| 1301 | */ | ||
| 1302 | |||
| 1303 | static __inline__ int | ||
| 1304 | tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, | ||
| 1305 | unsigned mss_now, int nonagle) | ||
| 1306 | { | ||
| 1307 | return (skb->len < mss_now && | ||
| 1308 | !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && | ||
| 1309 | ((nonagle&TCP_NAGLE_CORK) || | ||
| 1310 | (!nonagle && | ||
| 1311 | tp->packets_out && | ||
| 1312 | tcp_minshall_check(tp)))); | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | extern void tcp_set_skb_tso_segs(struct sock *, struct sk_buff *); | ||
| 1316 | |||
| 1317 | /* This checks if the data bearing packet SKB (usually sk->sk_send_head) | ||
| 1318 | * should be put on the wire right now. | ||
| 1319 | */ | ||
| 1320 | static __inline__ int tcp_snd_test(struct sock *sk, | ||
| 1321 | struct sk_buff *skb, | ||
| 1322 | unsigned cur_mss, int nonagle) | ||
| 1323 | { | ||
| 1324 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 1325 | int pkts = tcp_skb_pcount(skb); | ||
| 1326 | |||
| 1327 | if (!pkts) { | ||
| 1328 | tcp_set_skb_tso_segs(sk, skb); | ||
| 1329 | pkts = tcp_skb_pcount(skb); | ||
| 1330 | } | ||
| 1331 | |||
| 1332 | /* RFC 1122 - section 4.2.3.4 | ||
| 1333 | * | ||
| 1334 | * We must queue if | ||
| 1335 | * | ||
| 1336 | * a) The right edge of this frame exceeds the window | ||
| 1337 | * b) There are packets in flight and we have a small segment | ||
| 1338 | * [SWS avoidance and Nagle algorithm] | ||
| 1339 | * (part of SWS is done on packetization) | ||
| 1340 | * Minshall version sounds: there are no _small_ | ||
| 1341 | * segments in flight. (tcp_nagle_check) | ||
| 1342 | * c) We have too many packets 'in flight' | ||
| 1343 | * | ||
| 1344 | * Don't use the nagle rule for urgent data (or | ||
| 1345 | * for the final FIN -DaveM). | ||
| 1346 | * | ||
| 1347 | * Also, Nagle rule does not apply to frames, which | ||
| 1348 | * sit in the middle of queue (they have no chances | ||
| 1349 | * to get new data) and if room at tail of skb is | ||
| 1350 | * not enough to save something seriously (<32 for now). | ||
| 1351 | */ | ||
| 1352 | |||
| 1353 | /* Don't be strict about the congestion window for the | ||
| 1354 | * final FIN frame. -DaveM | ||
| 1355 | */ | ||
| 1356 | return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode | ||
| 1357 | || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) && | ||
| 1358 | (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) || | ||
| 1359 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) && | ||
| 1360 | !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd)); | ||
| 1361 | } | ||
| 1362 | |||
| 1363 | static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) | 1278 | static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) |
| 1364 | { | 1279 | { |
| 1365 | if (!tp->packets_out && !tp->pending) | 1280 | if (!tp->packets_out && !tp->pending) |
| 1366 | tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); | 1281 | tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); |
| 1367 | } | 1282 | } |
| 1368 | 1283 | ||
| 1369 | static __inline__ int tcp_skb_is_last(const struct sock *sk, | ||
| 1370 | const struct sk_buff *skb) | ||
| 1371 | { | ||
| 1372 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | ||
| 1373 | } | ||
| 1374 | |||
| 1375 | /* Push out any pending frames which were held back due to | ||
| 1376 | * TCP_CORK or attempt at coalescing tiny packets. | ||
| 1377 | * The socket must be locked by the caller. | ||
| 1378 | */ | ||
| 1379 | static __inline__ void __tcp_push_pending_frames(struct sock *sk, | ||
| 1380 | struct tcp_sock *tp, | ||
| 1381 | unsigned cur_mss, | ||
| 1382 | int nonagle) | ||
| 1383 | { | ||
| 1384 | struct sk_buff *skb = sk->sk_send_head; | ||
| 1385 | |||
| 1386 | if (skb) { | ||
| 1387 | if (!tcp_skb_is_last(sk, skb)) | ||
| 1388 | nonagle = TCP_NAGLE_PUSH; | ||
| 1389 | if (!tcp_snd_test(sk, skb, cur_mss, nonagle) || | ||
| 1390 | tcp_write_xmit(sk, nonagle)) | ||
| 1391 | tcp_check_probe_timer(sk, tp); | ||
| 1392 | } | ||
| 1393 | tcp_cwnd_validate(sk, tp); | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | static __inline__ void tcp_push_pending_frames(struct sock *sk, | 1284 | static __inline__ void tcp_push_pending_frames(struct sock *sk, |
| 1397 | struct tcp_sock *tp) | 1285 | struct tcp_sock *tp) |
| 1398 | { | 1286 | { |
| 1399 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); | 1287 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); |
| 1400 | } | 1288 | } |
| 1401 | 1289 | ||
| 1402 | static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) | ||
| 1403 | { | ||
| 1404 | struct sk_buff *skb = sk->sk_send_head; | ||
| 1405 | |||
| 1406 | return (skb && | ||
| 1407 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), | ||
| 1408 | tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle)); | ||
| 1409 | } | ||
| 1410 | |||
| 1411 | static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 1290 | static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) |
| 1412 | { | 1291 | { |
| 1413 | tp->snd_wl1 = seq; | 1292 | tp->snd_wl1 = seq; |
