aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-16 19:15:34 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-17 14:59:59 -0400
commita2a385d627e1549da4b43a8b3dfe370589766e1c (patch)
treed61e9913497c6c14406032f6a0822738707f1abf /net/ipv4/tcp_output.c
parente005d193d55ee5f757b13306112d8c23aac27a88 (diff)
tcp: bool conversions
bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c75
1 files changed, 38 insertions, 37 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1a630825c45b..803cbfe82fbc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -370,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
370 TCP_SKB_CB(skb)->end_seq = seq; 370 TCP_SKB_CB(skb)->end_seq = seq;
371} 371}
372 372
373static inline int tcp_urg_mode(const struct tcp_sock *tp) 373static inline bool tcp_urg_mode(const struct tcp_sock *tp)
374{ 374{
375 return tp->snd_una != tp->snd_up; 375 return tp->snd_una != tp->snd_up;
376} 376}
@@ -1391,20 +1391,20 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1391} 1391}
1392 1392
1393/* Minshall's variant of the Nagle send check. */ 1393/* Minshall's variant of the Nagle send check. */
1394static inline int tcp_minshall_check(const struct tcp_sock *tp) 1394static inline bool tcp_minshall_check(const struct tcp_sock *tp)
1395{ 1395{
1396 return after(tp->snd_sml, tp->snd_una) && 1396 return after(tp->snd_sml, tp->snd_una) &&
1397 !after(tp->snd_sml, tp->snd_nxt); 1397 !after(tp->snd_sml, tp->snd_nxt);
1398} 1398}
1399 1399
1400/* Return 0, if packet can be sent now without violation Nagle's rules: 1400/* Return false, if packet can be sent now without violation Nagle's rules:
1401 * 1. It is full sized. 1401 * 1. It is full sized.
1402 * 2. Or it contains FIN. (already checked by caller) 1402 * 2. Or it contains FIN. (already checked by caller)
1403 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1403 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1404 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1404 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1405 * With Minshall's modification: all sent small packets are ACKed. 1405 * With Minshall's modification: all sent small packets are ACKed.
1406 */ 1406 */
1407static inline int tcp_nagle_check(const struct tcp_sock *tp, 1407static inline bool tcp_nagle_check(const struct tcp_sock *tp,
1408 const struct sk_buff *skb, 1408 const struct sk_buff *skb,
1409 unsigned int mss_now, int nonagle) 1409 unsigned int mss_now, int nonagle)
1410{ 1410{
@@ -1413,11 +1413,11 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
1413 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1413 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1414} 1414}
1415 1415
1416/* Return non-zero if the Nagle test allows this packet to be 1416/* Return true if the Nagle test allows this packet to be
1417 * sent now. 1417 * sent now.
1418 */ 1418 */
1419static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1419static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1420 unsigned int cur_mss, int nonagle) 1420 unsigned int cur_mss, int nonagle)
1421{ 1421{
1422 /* Nagle rule does not apply to frames, which sit in the middle of the 1422 /* Nagle rule does not apply to frames, which sit in the middle of the
1423 * write_queue (they have no chances to get new data). 1423 * write_queue (they have no chances to get new data).
@@ -1426,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff
1426 * argument based upon the location of SKB in the send queue. 1426 * argument based upon the location of SKB in the send queue.
1427 */ 1427 */
1428 if (nonagle & TCP_NAGLE_PUSH) 1428 if (nonagle & TCP_NAGLE_PUSH)
1429 return 1; 1429 return true;
1430 1430
1431 /* Don't use the nagle rule for urgent data (or for the final FIN). 1431 /* Don't use the nagle rule for urgent data (or for the final FIN).
1432 * Nagle can be ignored during F-RTO too (see RFC4138). 1432 * Nagle can be ignored during F-RTO too (see RFC4138).
1433 */ 1433 */
1434 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1434 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1435 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1435 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1436 return 1; 1436 return true;
1437 1437
1438 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1438 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1439 return 1; 1439 return true;
1440 1440
1441 return 0; 1441 return false;
1442} 1442}
1443 1443
1444/* Does at least the first segment of SKB fit into the send window? */ 1444/* Does at least the first segment of SKB fit into the send window? */
1445static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1445static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1446 unsigned int cur_mss) 1446 const struct sk_buff *skb,
1447 unsigned int cur_mss)
1447{ 1448{
1448 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1449 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1449 1450
@@ -1476,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1476} 1477}
1477 1478
1478/* Test if sending is allowed right now. */ 1479/* Test if sending is allowed right now. */
1479int tcp_may_send_now(struct sock *sk) 1480bool tcp_may_send_now(struct sock *sk)
1480{ 1481{
1481 const struct tcp_sock *tp = tcp_sk(sk); 1482 const struct tcp_sock *tp = tcp_sk(sk);
1482 struct sk_buff *skb = tcp_send_head(sk); 1483 struct sk_buff *skb = tcp_send_head(sk);
@@ -1546,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1546 * 1547 *
1547 * This algorithm is from John Heffner. 1548 * This algorithm is from John Heffner.
1548 */ 1549 */
1549static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1550static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1550{ 1551{
1551 struct tcp_sock *tp = tcp_sk(sk); 1552 struct tcp_sock *tp = tcp_sk(sk);
1552 const struct inet_connection_sock *icsk = inet_csk(sk); 1553 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1606,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1606 /* Ok, it looks like it is advisable to defer. */ 1607 /* Ok, it looks like it is advisable to defer. */
1607 tp->tso_deferred = 1 | (jiffies << 1); 1608 tp->tso_deferred = 1 | (jiffies << 1);
1608 1609
1609 return 1; 1610 return true;
1610 1611
1611send_now: 1612send_now:
1612 tp->tso_deferred = 0; 1613 tp->tso_deferred = 0;
1613 return 0; 1614 return false;
1614} 1615}
1615 1616
1616/* Create a new MTU probe if we are ready. 1617/* Create a new MTU probe if we are ready.
@@ -1752,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk)
1752 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1753 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1753 * account rare use of URG, this is not a big flaw. 1754 * account rare use of URG, this is not a big flaw.
1754 * 1755 *
1755 * Returns 1, if no segments are in flight and we have queued segments, but 1756 * Returns true, if no segments are in flight and we have queued segments,
1756 * cannot send anything now because of SWS or another problem. 1757 * but cannot send anything now because of SWS or another problem.
1757 */ 1758 */
1758static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1759static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1759 int push_one, gfp_t gfp) 1760 int push_one, gfp_t gfp)
1760{ 1761{
1761 struct tcp_sock *tp = tcp_sk(sk); 1762 struct tcp_sock *tp = tcp_sk(sk);
1762 struct sk_buff *skb; 1763 struct sk_buff *skb;
@@ -1770,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1770 /* Do MTU probing. */ 1771 /* Do MTU probing. */
1771 result = tcp_mtu_probe(sk); 1772 result = tcp_mtu_probe(sk);
1772 if (!result) { 1773 if (!result) {
1773 return 0; 1774 return false;
1774 } else if (result > 0) { 1775 } else if (result > 0) {
1775 sent_pkts = 1; 1776 sent_pkts = 1;
1776 } 1777 }
@@ -1829,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1829 1830
1830 if (likely(sent_pkts)) { 1831 if (likely(sent_pkts)) {
1831 tcp_cwnd_validate(sk); 1832 tcp_cwnd_validate(sk);
1832 return 0; 1833 return false;
1833 } 1834 }
1834 return !tp->packets_out && tcp_send_head(sk); 1835 return !tp->packets_out && tcp_send_head(sk);
1835} 1836}
@@ -2028,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2028} 2029}
2029 2030
2030/* Check if coalescing SKBs is legal. */ 2031/* Check if coalescing SKBs is legal. */
2031static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 2032static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2032{ 2033{
2033 if (tcp_skb_pcount(skb) > 1) 2034 if (tcp_skb_pcount(skb) > 1)
2034 return 0; 2035 return false;
2035 /* TODO: SACK collapsing could be used to remove this condition */ 2036 /* TODO: SACK collapsing could be used to remove this condition */
2036 if (skb_shinfo(skb)->nr_frags != 0) 2037 if (skb_shinfo(skb)->nr_frags != 0)
2037 return 0; 2038 return false;
2038 if (skb_cloned(skb)) 2039 if (skb_cloned(skb))
2039 return 0; 2040 return false;
2040 if (skb == tcp_send_head(sk)) 2041 if (skb == tcp_send_head(sk))
2041 return 0; 2042 return false;
2042 /* Some heurestics for collapsing over SACK'd could be invented */ 2043 /* Some heurestics for collapsing over SACK'd could be invented */
2043 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2044 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2044 return 0; 2045 return false;
2045 2046
2046 return 1; 2047 return true;
2047} 2048}
2048 2049
2049/* Collapse packets in the retransmit queue to make to create 2050/* Collapse packets in the retransmit queue to make to create
@@ -2054,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2054{ 2055{
2055 struct tcp_sock *tp = tcp_sk(sk); 2056 struct tcp_sock *tp = tcp_sk(sk);
2056 struct sk_buff *skb = to, *tmp; 2057 struct sk_buff *skb = to, *tmp;
2057 int first = 1; 2058 bool first = true;
2058 2059
2059 if (!sysctl_tcp_retrans_collapse) 2060 if (!sysctl_tcp_retrans_collapse)
2060 return; 2061 return;
@@ -2068,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2068 space -= skb->len; 2069 space -= skb->len;
2069 2070
2070 if (first) { 2071 if (first) {
2071 first = 0; 2072 first = false;
2072 continue; 2073 continue;
2073 } 2074 }
2074 2075
@@ -2208,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2208/* Check if we forward retransmits are possible in the current 2209/* Check if we forward retransmits are possible in the current
2209 * window/congestion state. 2210 * window/congestion state.
2210 */ 2211 */
2211static int tcp_can_forward_retransmit(struct sock *sk) 2212static bool tcp_can_forward_retransmit(struct sock *sk)
2212{ 2213{
2213 const struct inet_connection_sock *icsk = inet_csk(sk); 2214 const struct inet_connection_sock *icsk = inet_csk(sk);
2214 const struct tcp_sock *tp = tcp_sk(sk); 2215 const struct tcp_sock *tp = tcp_sk(sk);
2215 2216
2216 /* Forward retransmissions are possible only during Recovery. */ 2217 /* Forward retransmissions are possible only during Recovery. */
2217 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2218 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2218 return 0; 2219 return false;
2219 2220
2220 /* No forward retransmissions in Reno are possible. */ 2221 /* No forward retransmissions in Reno are possible. */
2221 if (tcp_is_reno(tp)) 2222 if (tcp_is_reno(tp))
2222 return 0; 2223 return false;
2223 2224
2224 /* Yeah, we have to make difficult choice between forward transmission 2225 /* Yeah, we have to make difficult choice between forward transmission
2225 * and retransmission... Both ways have their merits... 2226 * and retransmission... Both ways have their merits...
@@ -2230,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk)
2230 */ 2231 */
2231 2232
2232 if (tcp_may_send_now(sk)) 2233 if (tcp_may_send_now(sk))
2233 return 0; 2234 return false;
2234 2235
2235 return 1; 2236 return true;
2236} 2237}
2237 2238
2238/* This gets called after a retransmit timeout, and the initially 2239/* This gets called after a retransmit timeout, and the initially