diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 62 |
1 files changed, 31 insertions, 31 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 58b7111523f4..cebe9aa918a3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -198,7 +198,7 @@ void tcp_select_initial_window(int __space, __u32 mss, | |||
198 | (*rcv_wscale) = 0; | 198 | (*rcv_wscale) = 0; |
199 | if (wscale_ok) { | 199 | if (wscale_ok) { |
200 | /* Set window scaling on max possible window | 200 | /* Set window scaling on max possible window |
201 | * See RFC1323 for an explanation of the limit to 14 | 201 | * See RFC1323 for an explanation of the limit to 14 |
202 | */ | 202 | */ |
203 | space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); | 203 | space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); |
204 | space = min_t(u32, space, *window_clamp); | 204 | space = min_t(u32, space, *window_clamp); |
@@ -451,7 +451,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
451 | (tp->rx_opt.eff_sacks * | 451 | (tp->rx_opt.eff_sacks * |
452 | TCPOLEN_SACK_PERBLOCK)); | 452 | TCPOLEN_SACK_PERBLOCK)); |
453 | } | 453 | } |
454 | 454 | ||
455 | if (tcp_packets_in_flight(tp) == 0) | 455 | if (tcp_packets_in_flight(tp) == 0) |
456 | tcp_ca_event(sk, CA_EVENT_TX_START); | 456 | tcp_ca_event(sk, CA_EVENT_TX_START); |
457 | 457 | ||
@@ -555,7 +555,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
555 | } | 555 | } |
556 | 556 | ||
557 | 557 | ||
558 | /* This routine just queue's the buffer | 558 | /* This routine just queue's the buffer |
559 | * | 559 | * |
560 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, | 560 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
561 | * otherwise socket can stall. | 561 | * otherwise socket can stall. |
@@ -597,7 +597,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned | |||
597 | 597 | ||
598 | /* Function to create two new TCP segments. Shrinks the given segment | 598 | /* Function to create two new TCP segments. Shrinks the given segment |
599 | * to the specified size and appends a new segment with the rest of the | 599 | * to the specified size and appends a new segment with the rest of the |
600 | * packet to the list. This won't be called frequently, I hope. | 600 | * packet to the list. This won't be called frequently, I hope. |
601 | * Remember, these are still headerless SKBs at this point. | 601 | * Remember, these are still headerless SKBs at this point. |
602 | */ | 602 | */ |
603 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) | 603 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) |
@@ -610,7 +610,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss | |||
610 | 610 | ||
611 | BUG_ON(len > skb->len); | 611 | BUG_ON(len > skb->len); |
612 | 612 | ||
613 | clear_all_retrans_hints(tp); | 613 | clear_all_retrans_hints(tp); |
614 | nsize = skb_headlen(skb) - len; | 614 | nsize = skb_headlen(skb) - len; |
615 | if (nsize < 0) | 615 | if (nsize < 0) |
616 | nsize = 0; | 616 | nsize = 0; |
@@ -821,7 +821,7 @@ void tcp_mtup_init(struct sock *sk) | |||
821 | 821 | ||
822 | icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; | 822 | icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; |
823 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + | 823 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + |
824 | icsk->icsk_af_ops->net_header_len; | 824 | icsk->icsk_af_ops->net_header_len; |
825 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); | 825 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); |
826 | icsk->icsk_mtup.probe_size = 0; | 826 | icsk->icsk_mtup.probe_size = 0; |
827 | } | 827 | } |
@@ -1008,7 +1008,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp) | |||
1008 | */ | 1008 | */ |
1009 | 1009 | ||
1010 | static inline int tcp_nagle_check(const struct tcp_sock *tp, | 1010 | static inline int tcp_nagle_check(const struct tcp_sock *tp, |
1011 | const struct sk_buff *skb, | 1011 | const struct sk_buff *skb, |
1012 | unsigned mss_now, int nonagle) | 1012 | unsigned mss_now, int nonagle) |
1013 | { | 1013 | { |
1014 | return (skb->len < mss_now && | 1014 | return (skb->len < mss_now && |
@@ -1078,7 +1078,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, | |||
1078 | return cwnd_quota; | 1078 | return cwnd_quota; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | static inline int tcp_skb_is_last(const struct sock *sk, | 1081 | static inline int tcp_skb_is_last(const struct sock *sk, |
1082 | const struct sk_buff *skb) | 1082 | const struct sk_buff *skb) |
1083 | { | 1083 | { |
1084 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | 1084 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; |
@@ -1298,7 +1298,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1298 | skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); | 1298 | skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); |
1299 | else | 1299 | else |
1300 | nskb->csum = skb_copy_and_csum_bits(skb, 0, | 1300 | nskb->csum = skb_copy_and_csum_bits(skb, 0, |
1301 | skb_put(nskb, copy), copy, nskb->csum); | 1301 | skb_put(nskb, copy), copy, nskb->csum); |
1302 | 1302 | ||
1303 | if (skb->len <= copy) { | 1303 | if (skb->len <= copy) { |
1304 | /* We've eaten all the data from this skb. | 1304 | /* We've eaten all the data from this skb. |
@@ -1308,7 +1308,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1308 | sk_stream_free_skb(sk, skb); | 1308 | sk_stream_free_skb(sk, skb); |
1309 | } else { | 1309 | } else { |
1310 | TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & | 1310 | TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & |
1311 | ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); | 1311 | ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); |
1312 | if (!skb_shinfo(skb)->nr_frags) { | 1312 | if (!skb_shinfo(skb)->nr_frags) { |
1313 | skb_pull(skb, copy); | 1313 | skb_pull(skb, copy); |
1314 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 1314 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
@@ -1501,7 +1501,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
1501 | 1501 | ||
1502 | /* This function returns the amount that we can raise the | 1502 | /* This function returns the amount that we can raise the |
1503 | * usable window based on the following constraints | 1503 | * usable window based on the following constraints |
1504 | * | 1504 | * |
1505 | * 1. The window can never be shrunk once it is offered (RFC 793) | 1505 | * 1. The window can never be shrunk once it is offered (RFC 793) |
1506 | * 2. We limit memory per socket | 1506 | * 2. We limit memory per socket |
1507 | * | 1507 | * |
@@ -1520,12 +1520,12 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
1520 | * side SWS prevention criteria. The problem is that under this rule | 1520 | * side SWS prevention criteria. The problem is that under this rule |
1521 | * a stream of single byte packets will cause the right side of the | 1521 | * a stream of single byte packets will cause the right side of the |
1522 | * window to always advance by a single byte. | 1522 | * window to always advance by a single byte. |
1523 | * | 1523 | * |
1524 | * Of course, if the sender implements sender side SWS prevention | 1524 | * Of course, if the sender implements sender side SWS prevention |
1525 | * then this will not be a problem. | 1525 | * then this will not be a problem. |
1526 | * | 1526 | * |
1527 | * BSD seems to make the following compromise: | 1527 | * BSD seems to make the following compromise: |
1528 | * | 1528 | * |
1529 | * If the free space is less than the 1/4 of the maximum | 1529 | * If the free space is less than the 1/4 of the maximum |
1530 | * space available and the free space is less than 1/2 mss, | 1530 | * space available and the free space is less than 1/2 mss, |
1531 | * then set the window to 0. | 1531 | * then set the window to 0. |
@@ -1567,7 +1567,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1567 | int window; | 1567 | int window; |
1568 | 1568 | ||
1569 | if (mss > full_space) | 1569 | if (mss > full_space) |
1570 | mss = full_space; | 1570 | mss = full_space; |
1571 | 1571 | ||
1572 | if (free_space < full_space/2) { | 1572 | if (free_space < full_space/2) { |
1573 | icsk->icsk_ack.quick = 0; | 1573 | icsk->icsk_ack.quick = 0; |
@@ -1691,9 +1691,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m | |||
1691 | } | 1691 | } |
1692 | 1692 | ||
1693 | /* Do a simple retransmit without using the backoff mechanisms in | 1693 | /* Do a simple retransmit without using the backoff mechanisms in |
1694 | * tcp_timer. This is used for path mtu discovery. | 1694 | * tcp_timer. This is used for path mtu discovery. |
1695 | * The socket is already locked here. | 1695 | * The socket is already locked here. |
1696 | */ | 1696 | */ |
1697 | void tcp_simple_retransmit(struct sock *sk) | 1697 | void tcp_simple_retransmit(struct sock *sk) |
1698 | { | 1698 | { |
1699 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1699 | const struct inet_connection_sock *icsk = inet_csk(sk); |
@@ -1703,7 +1703,7 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1703 | int lost = 0; | 1703 | int lost = 0; |
1704 | 1704 | ||
1705 | sk_stream_for_retrans_queue(skb, sk) { | 1705 | sk_stream_for_retrans_queue(skb, sk) { |
1706 | if (skb->len > mss && | 1706 | if (skb->len > mss && |
1707 | !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { | 1707 | !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { |
1708 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { | 1708 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { |
1709 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 1709 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
@@ -1724,7 +1724,7 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1724 | 1724 | ||
1725 | tcp_sync_left_out(tp); | 1725 | tcp_sync_left_out(tp); |
1726 | 1726 | ||
1727 | /* Don't muck with the congestion window here. | 1727 | /* Don't muck with the congestion window here. |
1728 | * Reason is that we do not increase amount of _data_ | 1728 | * Reason is that we do not increase amount of _data_ |
1729 | * in network, but units changed and effective | 1729 | * in network, but units changed and effective |
1730 | * cwnd/ssthresh really reduced now. | 1730 | * cwnd/ssthresh really reduced now. |
@@ -1747,7 +1747,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1747 | { | 1747 | { |
1748 | struct tcp_sock *tp = tcp_sk(sk); | 1748 | struct tcp_sock *tp = tcp_sk(sk); |
1749 | struct inet_connection_sock *icsk = inet_csk(sk); | 1749 | struct inet_connection_sock *icsk = inet_csk(sk); |
1750 | unsigned int cur_mss = tcp_current_mss(sk, 0); | 1750 | unsigned int cur_mss = tcp_current_mss(sk, 0); |
1751 | int err; | 1751 | int err; |
1752 | 1752 | ||
1753 | /* Inconslusive MTU probe */ | 1753 | /* Inconslusive MTU probe */ |
@@ -1984,10 +1984,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1984 | */ | 1984 | */ |
1985 | void tcp_send_fin(struct sock *sk) | 1985 | void tcp_send_fin(struct sock *sk) |
1986 | { | 1986 | { |
1987 | struct tcp_sock *tp = tcp_sk(sk); | 1987 | struct tcp_sock *tp = tcp_sk(sk); |
1988 | struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); | 1988 | struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); |
1989 | int mss_now; | 1989 | int mss_now; |
1990 | 1990 | ||
1991 | /* Optimization, tack on the FIN if we have a queue of | 1991 | /* Optimization, tack on the FIN if we have a queue of |
1992 | * unsent frames. But be careful about outgoing SACKS | 1992 | * unsent frames. But be careful about outgoing SACKS |
1993 | * and IP options. | 1993 | * and IP options. |
@@ -2146,17 +2146,17 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2146 | th->seq = htonl(TCP_SKB_CB(skb)->seq); | 2146 | th->seq = htonl(TCP_SKB_CB(skb)->seq); |
2147 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); | 2147 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); |
2148 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ | 2148 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ |
2149 | __u8 rcv_wscale; | 2149 | __u8 rcv_wscale; |
2150 | /* Set this up on the first call only */ | 2150 | /* Set this up on the first call only */ |
2151 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); | 2151 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
2152 | /* tcp_full_space because it is guaranteed to be the first packet */ | 2152 | /* tcp_full_space because it is guaranteed to be the first packet */ |
2153 | tcp_select_initial_window(tcp_full_space(sk), | 2153 | tcp_select_initial_window(tcp_full_space(sk), |
2154 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), | 2154 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
2155 | &req->rcv_wnd, | 2155 | &req->rcv_wnd, |
2156 | &req->window_clamp, | 2156 | &req->window_clamp, |
2157 | ireq->wscale_ok, | 2157 | ireq->wscale_ok, |
2158 | &rcv_wscale); | 2158 | &rcv_wscale); |
2159 | ireq->rcv_wscale = rcv_wscale; | 2159 | ireq->rcv_wscale = rcv_wscale; |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ | 2162 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ |
@@ -2192,9 +2192,9 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2192 | return skb; | 2192 | return skb; |
2193 | } | 2193 | } |
2194 | 2194 | ||
2195 | /* | 2195 | /* |
2196 | * Do all connect socket setups that can be done AF independent. | 2196 | * Do all connect socket setups that can be done AF independent. |
2197 | */ | 2197 | */ |
2198 | static void tcp_connect_init(struct sock *sk) | 2198 | static void tcp_connect_init(struct sock *sk) |
2199 | { | 2199 | { |
2200 | struct dst_entry *dst = __sk_dst_get(sk); | 2200 | struct dst_entry *dst = __sk_dst_get(sk); |
@@ -2251,7 +2251,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2251 | 2251 | ||
2252 | /* | 2252 | /* |
2253 | * Build a SYN and send it off. | 2253 | * Build a SYN and send it off. |
2254 | */ | 2254 | */ |
2255 | int tcp_connect(struct sock *sk) | 2255 | int tcp_connect(struct sock *sk) |
2256 | { | 2256 | { |
2257 | struct tcp_sock *tp = tcp_sk(sk); | 2257 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -2409,7 +2409,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) | |||
2409 | 2409 | ||
2410 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ | 2410 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ |
2411 | skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); | 2411 | skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); |
2412 | if (skb == NULL) | 2412 | if (skb == NULL) |
2413 | return -1; | 2413 | return -1; |
2414 | 2414 | ||
2415 | /* Reserve space for headers and set control bits. */ | 2415 | /* Reserve space for headers and set control bits. */ |
@@ -2498,7 +2498,7 @@ void tcp_send_probe0(struct sock *sk) | |||
2498 | if (icsk->icsk_backoff < sysctl_tcp_retries2) | 2498 | if (icsk->icsk_backoff < sysctl_tcp_retries2) |
2499 | icsk->icsk_backoff++; | 2499 | icsk->icsk_backoff++; |
2500 | icsk->icsk_probes_out++; | 2500 | icsk->icsk_probes_out++; |
2501 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 2501 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2502 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), | 2502 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), |
2503 | TCP_RTO_MAX); | 2503 | TCP_RTO_MAX); |
2504 | } else { | 2504 | } else { |
@@ -2510,7 +2510,7 @@ void tcp_send_probe0(struct sock *sk) | |||
2510 | */ | 2510 | */ |
2511 | if (!icsk->icsk_probes_out) | 2511 | if (!icsk->icsk_probes_out) |
2512 | icsk->icsk_probes_out = 1; | 2512 | icsk->icsk_probes_out = 1; |
2513 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 2513 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2514 | min(icsk->icsk_rto << icsk->icsk_backoff, | 2514 | min(icsk->icsk_rto << icsk->icsk_backoff, |
2515 | TCP_RESOURCE_PROBE_INTERVAL), | 2515 | TCP_RESOURCE_PROBE_INTERVAL), |
2516 | TCP_RTO_MAX); | 2516 | TCP_RTO_MAX); |