aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c150
1 files changed, 129 insertions, 21 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 389deeb2a457..2cbe879ee16a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -413,6 +413,135 @@ static inline void tcp_tso_set_push(struct sk_buff *skb)
413 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 413 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
414} 414}
415 415
416static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
417{
418 struct tcp_sock *tp = tcp_sk(sk);
419
420 if (skb->len <= tp->mss_cache_std ||
421 !(sk->sk_route_caps & NETIF_F_TSO)) {
422 /* Avoid the costly divide in the normal
423 * non-TSO case.
424 */
425 skb_shinfo(skb)->tso_segs = 1;
426 skb_shinfo(skb)->tso_size = 0;
427 } else {
428 unsigned int factor;
429
430 factor = skb->len + (tp->mss_cache_std - 1);
431 factor /= tp->mss_cache_std;
432 skb_shinfo(skb)->tso_segs = factor;
433 skb_shinfo(skb)->tso_size = tp->mss_cache_std;
434 }
435}
436
437static inline int tcp_minshall_check(const struct tcp_sock *tp)
438{
439 return after(tp->snd_sml,tp->snd_una) &&
440 !after(tp->snd_sml, tp->snd_nxt);
441}
442
443/* Return 0, if packet can be sent now without violation Nagle's rules:
444 * 1. It is full sized.
445 * 2. Or it contains FIN.
446 * 3. Or TCP_NODELAY was set.
447 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
448 * With Minshall's modification: all sent small packets are ACKed.
449 */
450
451static inline int tcp_nagle_check(const struct tcp_sock *tp,
452 const struct sk_buff *skb,
453 unsigned mss_now, int nonagle)
454{
455 return (skb->len < mss_now &&
456 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
457 ((nonagle&TCP_NAGLE_CORK) ||
458 (!nonagle &&
459 tp->packets_out &&
460 tcp_minshall_check(tp))));
461}
462
463/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
464 * should be put on the wire right now.
465 */
466static int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
467 unsigned cur_mss, int nonagle)
468{
469 struct tcp_sock *tp = tcp_sk(sk);
470 int pkts = tcp_skb_pcount(skb);
471
472 if (!pkts) {
473 tcp_set_skb_tso_segs(sk, skb);
474 pkts = tcp_skb_pcount(skb);
475 }
476
477 /* RFC 1122 - section 4.2.3.4
478 *
479 * We must queue if
480 *
481 * a) The right edge of this frame exceeds the window
482 * b) There are packets in flight and we have a small segment
483 * [SWS avoidance and Nagle algorithm]
484 * (part of SWS is done on packetization)
485 * Minshall version sounds: there are no _small_
486 * segments in flight. (tcp_nagle_check)
487 * c) We have too many packets 'in flight'
488 *
489 * Don't use the nagle rule for urgent data (or
490 * for the final FIN -DaveM).
491 *
492 * Also, Nagle rule does not apply to frames, which
493 * sit in the middle of queue (they have no chances
494 * to get new data) and if room at tail of skb is
495 * not enough to save something seriously (<32 for now).
496 */
497
498 /* Don't be strict about the congestion window for the
499 * final FIN frame. -DaveM
500 */
501 return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode
502 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
503 (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) ||
504 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
505 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
506}
507
508static inline int tcp_skb_is_last(const struct sock *sk,
509 const struct sk_buff *skb)
510{
511 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
512}
513
514/* Push out any pending frames which were held back due to
515 * TCP_CORK or attempt at coalescing tiny packets.
516 * The socket must be locked by the caller.
517 */
518void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
519 unsigned cur_mss, int nonagle)
520{
521 struct sk_buff *skb = sk->sk_send_head;
522
523 if (skb) {
524 if (!tcp_skb_is_last(sk, skb))
525 nonagle = TCP_NAGLE_PUSH;
526 if (!tcp_snd_test(sk, skb, cur_mss, nonagle) ||
527 tcp_write_xmit(sk, nonagle))
528 tcp_check_probe_timer(sk, tp);
529 }
530 tcp_cwnd_validate(sk, tp);
531}
532
533int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
534{
535 struct sk_buff *skb = sk->sk_send_head;
536
537 return (skb &&
538 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
539 (tcp_skb_is_last(sk, skb) ?
540 TCP_NAGLE_PUSH :
541 tp->nonagle)));
542}
543
544
416/* Send _single_ skb sitting at the send head. This function requires 545/* Send _single_ skb sitting at the send head. This function requires
417 * true push pending frames to setup probe timer etc. 546 * true push pending frames to setup probe timer etc.
418 */ 547 */
@@ -434,27 +563,6 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss)
434 } 563 }
435} 564}
436 565
437void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
438{
439 struct tcp_sock *tp = tcp_sk(sk);
440
441 if (skb->len <= tp->mss_cache_std ||
442 !(sk->sk_route_caps & NETIF_F_TSO)) {
443 /* Avoid the costly divide in the normal
444 * non-TSO case.
445 */
446 skb_shinfo(skb)->tso_segs = 1;
447 skb_shinfo(skb)->tso_size = 0;
448 } else {
449 unsigned int factor;
450
451 factor = skb->len + (tp->mss_cache_std - 1);
452 factor /= tp->mss_cache_std;
453 skb_shinfo(skb)->tso_segs = factor;
454 skb_shinfo(skb)->tso_size = tp->mss_cache_std;
455 }
456}
457
458/* Function to create two new TCP segments. Shrinks the given segment 566/* Function to create two new TCP segments. Shrinks the given segment
459 * to the specified size and appends a new segment with the rest of the 567 * to the specified size and appends a new segment with the rest of the
460 * packet to the list. This won't be called frequently, I hope. 568 * packet to the list. This won't be called frequently, I hope.