aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-12-31 07:51:11 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:00:24 -0500
commit058dc3342b71ffb3531c4f9df7c35f943f392b8d (patch)
treebea082c0dec704e6f2d05e49fad987eab96a9c23 /net
parent4828e7f49a402930e8b3e72de695c8d37e0f98ee (diff)
[TCP]: reduce tcp_output's indentation levels a bit
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_output.c239
1 files changed, 121 insertions, 118 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cd21528665f3..454cf84b6154 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1673,75 +1673,77 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1673{ 1673{
1674 struct tcp_sock *tp = tcp_sk(sk); 1674 struct tcp_sock *tp = tcp_sk(sk);
1675 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1675 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
1676 int skb_size, next_skb_size;
1677 u16 flags;
1676 1678
1677 /* The first test we must make is that neither of these two 1679 /* The first test we must make is that neither of these two
1678 * SKB's are still referenced by someone else. 1680 * SKB's are still referenced by someone else.
1679 */ 1681 */
1680 if (!skb_cloned(skb) && !skb_cloned(next_skb)) { 1682 if (skb_cloned(skb) || skb_cloned(next_skb))
1681 int skb_size = skb->len, next_skb_size = next_skb->len; 1683 return;
1682 u16 flags = TCP_SKB_CB(skb)->flags;
1683 1684
1684 /* Also punt if next skb has been SACK'd. */ 1685 skb_size = skb->len;
1685 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) 1686 next_skb_size = next_skb->len;
1686 return; 1687 flags = TCP_SKB_CB(skb)->flags;
1687 1688
1688 /* Next skb is out of window. */ 1689 /* Also punt if next skb has been SACK'd. */
1689 if (after(TCP_SKB_CB(next_skb)->end_seq, tcp_wnd_end(tp))) 1690 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
1690 return; 1691 return;
1691 1692
1692 /* Punt if not enough space exists in the first SKB for 1693 /* Next skb is out of window. */
1693 * the data in the second, or the total combined payload 1694 if (after(TCP_SKB_CB(next_skb)->end_seq, tcp_wnd_end(tp)))
1694 * would exceed the MSS. 1695 return;
1695 */
1696 if ((next_skb_size > skb_tailroom(skb)) ||
1697 ((skb_size + next_skb_size) > mss_now))
1698 return;
1699 1696
1700 BUG_ON(tcp_skb_pcount(skb) != 1 || 1697 /* Punt if not enough space exists in the first SKB for
1701 tcp_skb_pcount(next_skb) != 1); 1698 * the data in the second, or the total combined payload
1699 * would exceed the MSS.
1700 */
1701 if ((next_skb_size > skb_tailroom(skb)) ||
1702 ((skb_size + next_skb_size) > mss_now))
1703 return;
1702 1704
1703 tcp_highest_sack_combine(sk, next_skb, skb); 1705 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
1704 1706
1705 /* Ok. We will be able to collapse the packet. */ 1707 tcp_highest_sack_combine(sk, next_skb, skb);
1706 tcp_unlink_write_queue(next_skb, sk);
1707 1708
1708 skb_copy_from_linear_data(next_skb, 1709 /* Ok. We will be able to collapse the packet. */
1709 skb_put(skb, next_skb_size), 1710 tcp_unlink_write_queue(next_skb, sk);
1710 next_skb_size);
1711 1711
1712 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 1712 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
1713 skb->ip_summed = CHECKSUM_PARTIAL; 1713 next_skb_size);
1714 1714
1715 if (skb->ip_summed != CHECKSUM_PARTIAL) 1715 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
1716 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1716 skb->ip_summed = CHECKSUM_PARTIAL;
1717 1717
1718 /* Update sequence range on original skb. */ 1718 if (skb->ip_summed != CHECKSUM_PARTIAL)
1719 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1719 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1720 1720
1721 /* Merge over control information. */ 1721 /* Update sequence range on original skb. */
1722 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ 1722 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1723 TCP_SKB_CB(skb)->flags = flags;
1724 1723
1725 /* All done, get rid of second SKB and account for it so 1724 /* Merge over control information. */
1726 * packet counting does not break. 1725 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
1727 */ 1726 TCP_SKB_CB(skb)->flags = flags;
1728 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 1727
1729 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) 1728 /* All done, get rid of second SKB and account for it so
1730 tp->retrans_out -= tcp_skb_pcount(next_skb); 1729 * packet counting does not break.
1731 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) 1730 */
1732 tp->lost_out -= tcp_skb_pcount(next_skb); 1731 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
1733 /* Reno case is special. Sigh... */ 1732 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_RETRANS)
1734 if (tcp_is_reno(tp) && tp->sacked_out) 1733 tp->retrans_out -= tcp_skb_pcount(next_skb);
1735 tcp_dec_pcount_approx(&tp->sacked_out, next_skb); 1734 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_LOST)
1736 1735 tp->lost_out -= tcp_skb_pcount(next_skb);
1737 tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb)); 1736 /* Reno case is special. Sigh... */
1738 tp->packets_out -= tcp_skb_pcount(next_skb); 1737 if (tcp_is_reno(tp) && tp->sacked_out)
1739 1738 tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
1740 /* changed transmit queue under us so clear hints */ 1739
1741 tcp_clear_retrans_hints_partial(tp); 1740 tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb));
1742 1741 tp->packets_out -= tcp_skb_pcount(next_skb);
1743 sk_wmem_free_skb(sk, next_skb); 1742
1744 } 1743 /* changed transmit queue under us so clear hints */
1744 tcp_clear_retrans_hints_partial(tp);
1745
1746 sk_wmem_free_skb(sk, next_skb);
1745} 1747}
1746 1748
1747/* Do a simple retransmit without using the backoff mechanisms in 1749/* Do a simple retransmit without using the backoff mechanisms in
@@ -2416,37 +2418,38 @@ void tcp_send_delayed_ack(struct sock *sk)
2416/* This routine sends an ack and also updates the window. */ 2418/* This routine sends an ack and also updates the window. */
2417void tcp_send_ack(struct sock *sk) 2419void tcp_send_ack(struct sock *sk)
2418{ 2420{
2419 /* If we have been reset, we may not send again. */ 2421 struct sk_buff *buff;
2420 if (sk->sk_state != TCP_CLOSE) {
2421 struct sk_buff *buff;
2422 2422
2423 /* We are not putting this on the write queue, so 2423 /* If we have been reset, we may not send again. */
2424 * tcp_transmit_skb() will set the ownership to this 2424 if (sk->sk_state == TCP_CLOSE)
2425 * sock. 2425 return;
2426 */
2427 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2428 if (buff == NULL) {
2429 inet_csk_schedule_ack(sk);
2430 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2431 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2432 TCP_DELACK_MAX, TCP_RTO_MAX);
2433 return;
2434 }
2435 2426
2436 /* Reserve space for headers and prepare control bits. */ 2427 /* We are not putting this on the write queue, so
2437 skb_reserve(buff, MAX_TCP_HEADER); 2428 * tcp_transmit_skb() will set the ownership to this
2438 buff->csum = 0; 2429 * sock.
2439 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 2430 */
2440 TCP_SKB_CB(buff)->sacked = 0; 2431 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2441 skb_shinfo(buff)->gso_segs = 1; 2432 if (buff == NULL) {
2442 skb_shinfo(buff)->gso_size = 0; 2433 inet_csk_schedule_ack(sk);
2443 skb_shinfo(buff)->gso_type = 0; 2434 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2444 2435 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2445 /* Send it off, this clears delayed acks for us. */ 2436 TCP_DELACK_MAX, TCP_RTO_MAX);
2446 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk); 2437 return;
2447 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2448 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2449 } 2438 }
2439
2440 /* Reserve space for headers and prepare control bits. */
2441 skb_reserve(buff, MAX_TCP_HEADER);
2442 buff->csum = 0;
2443 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
2444 TCP_SKB_CB(buff)->sacked = 0;
2445 skb_shinfo(buff)->gso_segs = 1;
2446 skb_shinfo(buff)->gso_size = 0;
2447 skb_shinfo(buff)->gso_type = 0;
2448
2449 /* Send it off, this clears delayed acks for us. */
2450 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
2451 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2452 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2450} 2453}
2451 2454
2452/* This routine sends a packet with an out of date sequence 2455/* This routine sends a packet with an out of date sequence
@@ -2491,46 +2494,46 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2491 2494
2492int tcp_write_wakeup(struct sock *sk) 2495int tcp_write_wakeup(struct sock *sk)
2493{ 2496{
2494 if (sk->sk_state != TCP_CLOSE) { 2497 struct tcp_sock *tp = tcp_sk(sk);
2495 struct tcp_sock *tp = tcp_sk(sk); 2498 struct sk_buff *skb;
2496 struct sk_buff *skb;
2497
2498 if ((skb = tcp_send_head(sk)) != NULL &&
2499 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
2500 int err;
2501 unsigned int mss = tcp_current_mss(sk, 0);
2502 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2503
2504 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2505 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2506
2507 /* We are probing the opening of a window
2508 * but the window size is != 0
2509 * must have been a result SWS avoidance ( sender )
2510 */
2511 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2512 skb->len > mss) {
2513 seg_size = min(seg_size, mss);
2514 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2515 if (tcp_fragment(sk, skb, seg_size, mss))
2516 return -1;
2517 } else if (!tcp_skb_pcount(skb))
2518 tcp_set_skb_tso_segs(sk, skb, mss);
2519 2499
2500 if (sk->sk_state == TCP_CLOSE)
2501 return -1;
2502
2503 if ((skb = tcp_send_head(sk)) != NULL &&
2504 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
2505 int err;
2506 unsigned int mss = tcp_current_mss(sk, 0);
2507 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2508
2509 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2510 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2511
2512 /* We are probing the opening of a window
2513 * but the window size is != 0
2514 * must have been a result SWS avoidance ( sender )
2515 */
2516 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2517 skb->len > mss) {
2518 seg_size = min(seg_size, mss);
2520 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2519 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2521 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2520 if (tcp_fragment(sk, skb, seg_size, mss))
2522 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2521 return -1;
2523 if (!err) 2522 } else if (!tcp_skb_pcount(skb))
2524 tcp_event_new_data_sent(sk, skb); 2523 tcp_set_skb_tso_segs(sk, skb, mss);
2525 return err; 2524
2526 } else { 2525 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2527 if (tp->urg_mode && 2526 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2528 between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF)) 2527 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2529 tcp_xmit_probe_skb(sk, 1); 2528 if (!err)
2530 return tcp_xmit_probe_skb(sk, 0); 2529 tcp_event_new_data_sent(sk, skb);
2531 } 2530 return err;
2531 } else {
2532 if (tp->urg_mode &&
2533 between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
2534 tcp_xmit_probe_skb(sk, 1);
2535 return tcp_xmit_probe_skb(sk, 0);
2532 } 2536 }
2533 return -1;
2534} 2537}
2535 2538
2536/* A window probe timeout has occurred. If window is not closed send 2539/* A window probe timeout has occurred. If window is not closed send