diff options
author | David S. Miller <davem@davemloft.net> | 2005-07-05 18:18:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-07-05 18:18:03 -0400 |
commit | f6302d1d78f77c2d4c8bd32b0afc2df7fdf5f281 (patch) | |
tree | 204ae65914f845312059af3fbd633460bda820e9 /include/net/tcp.h | |
parent | fc6415bcb0f58f03adb910e56d7e1df6368794e0 (diff) |
[TCP]: Move send test logic out of net/tcp.h
This just moves the code into tcp_output.c, no code logic changes are
made by this patch.
Using this as a baseline, we can begin to untangle the mess of
comparisons for the Nagle test et al. We will also be able to reduce
all of the redundant computation that occurs when outputting data
packets.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 113 |
1 files changed, 3 insertions, 110 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index afe41c5de2f2..f2b104532de1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -849,6 +849,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | |||
849 | /* tcp_output.c */ | 849 | /* tcp_output.c */ |
850 | 850 | ||
851 | extern int tcp_write_xmit(struct sock *, int nonagle); | 851 | extern int tcp_write_xmit(struct sock *, int nonagle); |
852 | extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, | ||
853 | unsigned cur_mss, int nonagle); | ||
854 | extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); | ||
852 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); | 855 | extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); |
853 | extern void tcp_xmit_retransmit_queue(struct sock *); | 856 | extern void tcp_xmit_retransmit_queue(struct sock *); |
854 | extern void tcp_simple_retransmit(struct sock *); | 857 | extern void tcp_simple_retransmit(struct sock *); |
@@ -1284,12 +1287,6 @@ static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) | |||
1284 | return 3; | 1287 | return 3; |
1285 | } | 1288 | } |
1286 | 1289 | ||
1287 | static __inline__ int tcp_minshall_check(const struct tcp_sock *tp) | ||
1288 | { | ||
1289 | return after(tp->snd_sml,tp->snd_una) && | ||
1290 | !after(tp->snd_sml, tp->snd_nxt); | ||
1291 | } | ||
1292 | |||
1293 | static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, | 1290 | static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, |
1294 | const struct sk_buff *skb) | 1291 | const struct sk_buff *skb) |
1295 | { | 1292 | { |
@@ -1297,122 +1294,18 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, | |||
1297 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; | 1294 | tp->snd_sml = TCP_SKB_CB(skb)->end_seq; |
1298 | } | 1295 | } |
1299 | 1296 | ||
1300 | /* Return 0, if packet can be sent now without violation Nagle's rules: | ||
1301 | 1. It is full sized. | ||
1302 | 2. Or it contains FIN. | ||
1303 | 3. Or TCP_NODELAY was set. | ||
1304 | 4. Or TCP_CORK is not set, and all sent packets are ACKed. | ||
1305 | With Minshall's modification: all sent small packets are ACKed. | ||
1306 | */ | ||
1307 | |||
1308 | static __inline__ int | ||
1309 | tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, | ||
1310 | unsigned mss_now, int nonagle) | ||
1311 | { | ||
1312 | return (skb->len < mss_now && | ||
1313 | !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && | ||
1314 | ((nonagle&TCP_NAGLE_CORK) || | ||
1315 | (!nonagle && | ||
1316 | tp->packets_out && | ||
1317 | tcp_minshall_check(tp)))); | ||
1318 | } | ||
1319 | |||
1320 | extern void tcp_set_skb_tso_segs(struct sock *, struct sk_buff *); | ||
1321 | |||
1322 | /* This checks if the data bearing packet SKB (usually sk->sk_send_head) | ||
1323 | * should be put on the wire right now. | ||
1324 | */ | ||
1325 | static __inline__ int tcp_snd_test(struct sock *sk, | ||
1326 | struct sk_buff *skb, | ||
1327 | unsigned cur_mss, int nonagle) | ||
1328 | { | ||
1329 | struct tcp_sock *tp = tcp_sk(sk); | ||
1330 | int pkts = tcp_skb_pcount(skb); | ||
1331 | |||
1332 | if (!pkts) { | ||
1333 | tcp_set_skb_tso_segs(sk, skb); | ||
1334 | pkts = tcp_skb_pcount(skb); | ||
1335 | } | ||
1336 | |||
1337 | /* RFC 1122 - section 4.2.3.4 | ||
1338 | * | ||
1339 | * We must queue if | ||
1340 | * | ||
1341 | * a) The right edge of this frame exceeds the window | ||
1342 | * b) There are packets in flight and we have a small segment | ||
1343 | * [SWS avoidance and Nagle algorithm] | ||
1344 | * (part of SWS is done on packetization) | ||
1345 | * Minshall version sounds: there are no _small_ | ||
1346 | * segments in flight. (tcp_nagle_check) | ||
1347 | * c) We have too many packets 'in flight' | ||
1348 | * | ||
1349 | * Don't use the nagle rule for urgent data (or | ||
1350 | * for the final FIN -DaveM). | ||
1351 | * | ||
1352 | * Also, Nagle rule does not apply to frames, which | ||
1353 | * sit in the middle of queue (they have no chances | ||
1354 | * to get new data) and if room at tail of skb is | ||
1355 | * not enough to save something seriously (<32 for now). | ||
1356 | */ | ||
1357 | |||
1358 | /* Don't be strict about the congestion window for the | ||
1359 | * final FIN frame. -DaveM | ||
1360 | */ | ||
1361 | return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode | ||
1362 | || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) && | ||
1363 | (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) || | ||
1364 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) && | ||
1365 | !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd)); | ||
1366 | } | ||
1367 | |||
1368 | static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) | 1297 | static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) |
1369 | { | 1298 | { |
1370 | if (!tp->packets_out && !tp->pending) | 1299 | if (!tp->packets_out && !tp->pending) |
1371 | tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); | 1300 | tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); |
1372 | } | 1301 | } |
1373 | 1302 | ||
1374 | static __inline__ int tcp_skb_is_last(const struct sock *sk, | ||
1375 | const struct sk_buff *skb) | ||
1376 | { | ||
1377 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | ||
1378 | } | ||
1379 | |||
1380 | /* Push out any pending frames which were held back due to | ||
1381 | * TCP_CORK or attempt at coalescing tiny packets. | ||
1382 | * The socket must be locked by the caller. | ||
1383 | */ | ||
1384 | static __inline__ void __tcp_push_pending_frames(struct sock *sk, | ||
1385 | struct tcp_sock *tp, | ||
1386 | unsigned cur_mss, | ||
1387 | int nonagle) | ||
1388 | { | ||
1389 | struct sk_buff *skb = sk->sk_send_head; | ||
1390 | |||
1391 | if (skb) { | ||
1392 | if (!tcp_skb_is_last(sk, skb)) | ||
1393 | nonagle = TCP_NAGLE_PUSH; | ||
1394 | if (!tcp_snd_test(sk, skb, cur_mss, nonagle) || | ||
1395 | tcp_write_xmit(sk, nonagle)) | ||
1396 | tcp_check_probe_timer(sk, tp); | ||
1397 | } | ||
1398 | tcp_cwnd_validate(sk, tp); | ||
1399 | } | ||
1400 | |||
1401 | static __inline__ void tcp_push_pending_frames(struct sock *sk, | 1303 | static __inline__ void tcp_push_pending_frames(struct sock *sk, |
1402 | struct tcp_sock *tp) | 1304 | struct tcp_sock *tp) |
1403 | { | 1305 | { |
1404 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); | 1306 | __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); |
1405 | } | 1307 | } |
1406 | 1308 | ||
1407 | static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) | ||
1408 | { | ||
1409 | struct sk_buff *skb = sk->sk_send_head; | ||
1410 | |||
1411 | return (skb && | ||
1412 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), | ||
1413 | tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle)); | ||
1414 | } | ||
1415 | |||
1416 | static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) | 1309 | static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) |
1417 | { | 1310 | { |
1418 | tp->snd_wl1 = seq; | 1311 | tp->snd_wl1 = seq; |