aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-07-05 18:19:54 -0400
committerDavid S. Miller <davem@davemloft.net>2005-07-05 18:19:54 -0400
commit7f4dd0a9438c73cbb1c240ece31390cf2c57294e (patch)
tree09b31dd26a3f51c3bb34647ed6911350c5de32e6 /net/ipv4/tcp_output.c
parent55c97f3e990c1ff63957c64f6cb10711a09fd70e (diff)
[TCP]: Break out tcp_snd_test() into it's constituent parts.
tcp_snd_test() does several different things, use inline functions to express this more clearly. 1) It initializes the TSO count of SKB, if necessary. 2) It performs the Nagle test. 3) It makes sure the congestion window is adhered to. 4) It makes sure SKB fits into the send window. This cleanup also sets things up so that things like the available packets in the congestion window does not need to be calculated multiple times by packet sending loops such as tcp_write_xmit(). Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c120
1 files changed, 82 insertions, 38 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ce1d7cfbecfc..8327e5e86d15 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -434,6 +434,33 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
434 } 434 }
435} 435}
436 436
437/* Does SKB fit into the send window? */
438static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
439{
440 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
441
442 return !after(end_seq, tp->snd_una + tp->snd_wnd);
443}
444
445/* Can at least one segment of SKB be sent right now, according to the
446 * congestion window rules? If so, return how many segments are allowed.
447 */
448static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
449{
450 u32 in_flight, cwnd;
451
452 /* Don't be strict about the congestion window for the final FIN. */
453 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
454 return 1;
455
456 in_flight = tcp_packets_in_flight(tp);
457 cwnd = tp->snd_cwnd;
458 if (in_flight < cwnd)
459 return (cwnd - in_flight);
460
461 return 0;
462}
463
437static inline int tcp_minshall_check(const struct tcp_sock *tp) 464static inline int tcp_minshall_check(const struct tcp_sock *tp)
438{ 465{
439 return after(tp->snd_sml,tp->snd_una) && 466 return after(tp->snd_sml,tp->snd_una) &&
@@ -442,7 +469,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp)
442 469
443/* Return 0, if packet can be sent now without violation Nagle's rules: 470/* Return 0, if packet can be sent now without violation Nagle's rules:
444 * 1. It is full sized. 471 * 1. It is full sized.
445 * 2. Or it contains FIN. 472 * 2. Or it contains FIN. (already checked by caller)
446 * 3. Or TCP_NODELAY was set. 473 * 3. Or TCP_NODELAY was set.
447 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 474 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
448 * With Minshall's modification: all sent small packets are ACKed. 475 * With Minshall's modification: all sent small packets are ACKed.
@@ -453,56 +480,73 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
453 unsigned mss_now, int nonagle) 480 unsigned mss_now, int nonagle)
454{ 481{
455 return (skb->len < mss_now && 482 return (skb->len < mss_now &&
456 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
457 ((nonagle&TCP_NAGLE_CORK) || 483 ((nonagle&TCP_NAGLE_CORK) ||
458 (!nonagle && 484 (!nonagle &&
459 tp->packets_out && 485 tp->packets_out &&
460 tcp_minshall_check(tp)))); 486 tcp_minshall_check(tp))));
461} 487}
462 488
463/* This checks if the data bearing packet SKB (usually sk->sk_send_head) 489/* Return non-zero if the Nagle test allows this packet to be
464 * should be put on the wire right now. 490 * sent now.
465 */ 491 */
466static int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 492static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
467 unsigned cur_mss, int nonagle) 493 unsigned int cur_mss, int nonagle)
468{ 494{
469 struct tcp_sock *tp = tcp_sk(sk); 495 /* Nagle rule does not apply to frames, which sit in the middle of the
470 int pkts = tcp_skb_pcount(skb); 496 * write_queue (they have no chances to get new data).
497 *
498 * This is implemented in the callers, where they modify the 'nonagle'
499 * argument based upon the location of SKB in the send queue.
500 */
501 if (nonagle & TCP_NAGLE_PUSH)
502 return 1;
503
504 /* Don't use the nagle rule for urgent data (or for the final FIN). */
505 if (tp->urg_mode ||
506 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
507 return 1;
508
509 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
510 return 1;
471 511
472 if (!pkts) { 512 return 0;
513}
514
515/* This must be invoked the first time we consider transmitting
516 * SKB onto the wire.
517 */
518static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb)
519{
520 int tso_segs = tcp_skb_pcount(skb);
521
522 if (!tso_segs) {
473 tcp_set_skb_tso_segs(sk, skb); 523 tcp_set_skb_tso_segs(sk, skb);
474 pkts = tcp_skb_pcount(skb); 524 tso_segs = tcp_skb_pcount(skb);
475 } 525 }
526 return tso_segs;
527}
476 528
477 /* RFC 1122 - section 4.2.3.4 529/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
478 * 530 * should be put on the wire right now. If so, it returns the number of
479 * We must queue if 531 * packets allowed by the congestion window.
480 * 532 */
481 * a) The right edge of this frame exceeds the window 533static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
482 * b) There are packets in flight and we have a small segment 534 unsigned int cur_mss, int nonagle)
483 * [SWS avoidance and Nagle algorithm] 535{
484 * (part of SWS is done on packetization) 536 struct tcp_sock *tp = tcp_sk(sk);
485 * Minshall version sounds: there are no _small_ 537 unsigned int cwnd_quota;
486 * segments in flight. (tcp_nagle_check)
487 * c) We have too many packets 'in flight'
488 *
489 * Don't use the nagle rule for urgent data (or
490 * for the final FIN -DaveM).
491 *
492 * Also, Nagle rule does not apply to frames, which
493 * sit in the middle of queue (they have no chances
494 * to get new data) and if room at tail of skb is
495 * not enough to save something seriously (<32 for now).
496 */
497 538
498 /* Don't be strict about the congestion window for the 539 tcp_init_tso_segs(sk, skb);
499 * final FIN frame. -DaveM 540
500 */ 541 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
501 return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode 542 return 0;
502 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) && 543
503 (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) || 544 cwnd_quota = tcp_cwnd_test(tp, skb);
504 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) && 545 if (cwnd_quota &&
505 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd)); 546 !tcp_snd_wnd_test(tp, skb, cur_mss))
547 cwnd_quota = 0;
548
549 return cwnd_quota;
506} 550}
507 551
508static inline int tcp_skb_is_last(const struct sock *sk, 552static inline int tcp_skb_is_last(const struct sock *sk,