aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-16 19:15:34 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-17 14:59:59 -0400
commita2a385d627e1549da4b43a8b3dfe370589766e1c (patch)
treed61e9913497c6c14406032f6a0822738707f1abf /net/ipv4/tcp_input.c
parente005d193d55ee5f757b13306112d8c23aac27a88 (diff)
tcp: bool conversions
bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c214
1 files changed, 108 insertions, 106 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb97787be75..b961ef54b17 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -196,9 +196,10 @@ static void tcp_enter_quickack_mode(struct sock *sk)
196 * and the session is not interactive. 196 * and the session is not interactive.
197 */ 197 */
198 198
199static inline int tcp_in_quickack_mode(const struct sock *sk) 199static inline bool tcp_in_quickack_mode(const struct sock *sk)
200{ 200{
201 const struct inet_connection_sock *icsk = inet_csk(sk); 201 const struct inet_connection_sock *icsk = inet_csk(sk);
202
202 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 203 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
203} 204}
204 205
@@ -253,11 +254,11 @@ static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
253 tp->ecn_flags &= ~TCP_ECN_OK; 254 tp->ecn_flags &= ~TCP_ECN_OK;
254} 255}
255 256
256static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 257static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
257{ 258{
258 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 259 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
259 return 1; 260 return true;
260 return 0; 261 return false;
261} 262}
262 263
263/* Buffer size and advertised window tuning. 264/* Buffer size and advertised window tuning.
@@ -1123,36 +1124,36 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
1123 * the exact amount is rather hard to quantify. However, tp->max_window can 1124 * the exact amount is rather hard to quantify. However, tp->max_window can
1124 * be used as an exaggerated estimate. 1125 * be used as an exaggerated estimate.
1125 */ 1126 */
1126static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, 1127static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1127 u32 start_seq, u32 end_seq) 1128 u32 start_seq, u32 end_seq)
1128{ 1129{
1129 /* Too far in future, or reversed (interpretation is ambiguous) */ 1130 /* Too far in future, or reversed (interpretation is ambiguous) */
1130 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1131 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1131 return 0; 1132 return false;
1132 1133
1133 /* Nasty start_seq wrap-around check (see comments above) */ 1134 /* Nasty start_seq wrap-around check (see comments above) */
1134 if (!before(start_seq, tp->snd_nxt)) 1135 if (!before(start_seq, tp->snd_nxt))
1135 return 0; 1136 return false;
1136 1137
1137 /* In outstanding window? ...This is valid exit for D-SACKs too. 1138 /* In outstanding window? ...This is valid exit for D-SACKs too.
1138 * start_seq == snd_una is non-sensical (see comments above) 1139 * start_seq == snd_una is non-sensical (see comments above)
1139 */ 1140 */
1140 if (after(start_seq, tp->snd_una)) 1141 if (after(start_seq, tp->snd_una))
1141 return 1; 1142 return true;
1142 1143
1143 if (!is_dsack || !tp->undo_marker) 1144 if (!is_dsack || !tp->undo_marker)
1144 return 0; 1145 return false;
1145 1146
1146 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1147 /* ...Then it's D-SACK, and must reside below snd_una completely */
1147 if (after(end_seq, tp->snd_una)) 1148 if (after(end_seq, tp->snd_una))
1148 return 0; 1149 return false;
1149 1150
1150 if (!before(start_seq, tp->undo_marker)) 1151 if (!before(start_seq, tp->undo_marker))
1151 return 1; 1152 return true;
1152 1153
1153 /* Too old */ 1154 /* Too old */
1154 if (!after(end_seq, tp->undo_marker)) 1155 if (!after(end_seq, tp->undo_marker))
1155 return 0; 1156 return false;
1156 1157
1157 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1158 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1158 * start_seq < undo_marker and end_seq >= undo_marker. 1159 * start_seq < undo_marker and end_seq >= undo_marker.
@@ -1224,17 +1225,17 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1224 tp->lost_retrans_low = new_low_seq; 1225 tp->lost_retrans_low = new_low_seq;
1225} 1226}
1226 1227
1227static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1228static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1228 struct tcp_sack_block_wire *sp, int num_sacks, 1229 struct tcp_sack_block_wire *sp, int num_sacks,
1229 u32 prior_snd_una) 1230 u32 prior_snd_una)
1230{ 1231{
1231 struct tcp_sock *tp = tcp_sk(sk); 1232 struct tcp_sock *tp = tcp_sk(sk);
1232 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1233 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1233 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1234 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1234 int dup_sack = 0; 1235 bool dup_sack = false;
1235 1236
1236 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1237 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1237 dup_sack = 1; 1238 dup_sack = true;
1238 tcp_dsack_seen(tp); 1239 tcp_dsack_seen(tp);
1239 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1240 } else if (num_sacks > 1) { 1241 } else if (num_sacks > 1) {
@@ -1243,7 +1244,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1243 1244
1244 if (!after(end_seq_0, end_seq_1) && 1245 if (!after(end_seq_0, end_seq_1) &&
1245 !before(start_seq_0, start_seq_1)) { 1246 !before(start_seq_0, start_seq_1)) {
1246 dup_sack = 1; 1247 dup_sack = true;
1247 tcp_dsack_seen(tp); 1248 tcp_dsack_seen(tp);
1248 NET_INC_STATS_BH(sock_net(sk), 1249 NET_INC_STATS_BH(sock_net(sk),
1249 LINUX_MIB_TCPDSACKOFORECV); 1250 LINUX_MIB_TCPDSACKOFORECV);
@@ -1274,9 +1275,10 @@ struct tcp_sacktag_state {
1274 * FIXME: this could be merged to shift decision code 1275 * FIXME: this could be merged to shift decision code
1275 */ 1276 */
1276static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1277static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1277 u32 start_seq, u32 end_seq) 1278 u32 start_seq, u32 end_seq)
1278{ 1279{
1279 int in_sack, err; 1280 int err;
1281 bool in_sack;
1280 unsigned int pkt_len; 1282 unsigned int pkt_len;
1281 unsigned int mss; 1283 unsigned int mss;
1282 1284
@@ -1322,7 +1324,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1322static u8 tcp_sacktag_one(struct sock *sk, 1324static u8 tcp_sacktag_one(struct sock *sk,
1323 struct tcp_sacktag_state *state, u8 sacked, 1325 struct tcp_sacktag_state *state, u8 sacked,
1324 u32 start_seq, u32 end_seq, 1326 u32 start_seq, u32 end_seq,
1325 int dup_sack, int pcount) 1327 bool dup_sack, int pcount)
1326{ 1328{
1327 struct tcp_sock *tp = tcp_sk(sk); 1329 struct tcp_sock *tp = tcp_sk(sk);
1328 int fack_count = state->fack_count; 1330 int fack_count = state->fack_count;
@@ -1402,10 +1404,10 @@ static u8 tcp_sacktag_one(struct sock *sk,
1402/* Shift newly-SACKed bytes from this skb to the immediately previous 1404/* Shift newly-SACKed bytes from this skb to the immediately previous
1403 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1405 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1404 */ 1406 */
1405static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1407static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1406 struct tcp_sacktag_state *state, 1408 struct tcp_sacktag_state *state,
1407 unsigned int pcount, int shifted, int mss, 1409 unsigned int pcount, int shifted, int mss,
1408 int dup_sack) 1410 bool dup_sack)
1409{ 1411{
1410 struct tcp_sock *tp = tcp_sk(sk); 1412 struct tcp_sock *tp = tcp_sk(sk);
1411 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1413 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1455,7 +1457,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1455 if (skb->len > 0) { 1457 if (skb->len > 0) {
1456 BUG_ON(!tcp_skb_pcount(skb)); 1458 BUG_ON(!tcp_skb_pcount(skb));
1457 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1459 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1458 return 0; 1460 return false;
1459 } 1461 }
1460 1462
1461 /* Whole SKB was eaten :-) */ 1463 /* Whole SKB was eaten :-) */
@@ -1478,7 +1480,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1478 1480
1479 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); 1481 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
1480 1482
1481 return 1; 1483 return true;
1482} 1484}
1483 1485
1484/* I wish gso_size would have a bit more sane initialization than 1486/* I wish gso_size would have a bit more sane initialization than
@@ -1501,7 +1503,7 @@ static int skb_can_shift(const struct sk_buff *skb)
1501static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1503static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1502 struct tcp_sacktag_state *state, 1504 struct tcp_sacktag_state *state,
1503 u32 start_seq, u32 end_seq, 1505 u32 start_seq, u32 end_seq,
1504 int dup_sack) 1506 bool dup_sack)
1505{ 1507{
1506 struct tcp_sock *tp = tcp_sk(sk); 1508 struct tcp_sock *tp = tcp_sk(sk);
1507 struct sk_buff *prev; 1509 struct sk_buff *prev;
@@ -1640,14 +1642,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1640 struct tcp_sack_block *next_dup, 1642 struct tcp_sack_block *next_dup,
1641 struct tcp_sacktag_state *state, 1643 struct tcp_sacktag_state *state,
1642 u32 start_seq, u32 end_seq, 1644 u32 start_seq, u32 end_seq,
1643 int dup_sack_in) 1645 bool dup_sack_in)
1644{ 1646{
1645 struct tcp_sock *tp = tcp_sk(sk); 1647 struct tcp_sock *tp = tcp_sk(sk);
1646 struct sk_buff *tmp; 1648 struct sk_buff *tmp;
1647 1649
1648 tcp_for_write_queue_from(skb, sk) { 1650 tcp_for_write_queue_from(skb, sk) {
1649 int in_sack = 0; 1651 int in_sack = 0;
1650 int dup_sack = dup_sack_in; 1652 bool dup_sack = dup_sack_in;
1651 1653
1652 if (skb == tcp_send_head(sk)) 1654 if (skb == tcp_send_head(sk))
1653 break; 1655 break;
@@ -1662,7 +1664,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1662 next_dup->start_seq, 1664 next_dup->start_seq,
1663 next_dup->end_seq); 1665 next_dup->end_seq);
1664 if (in_sack > 0) 1666 if (in_sack > 0)
1665 dup_sack = 1; 1667 dup_sack = true;
1666 } 1668 }
1667 1669
1668 /* skb reference here is a bit tricky to get right, since 1670 /* skb reference here is a bit tricky to get right, since
@@ -1767,7 +1769,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1767 struct sk_buff *skb; 1769 struct sk_buff *skb;
1768 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1770 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1769 int used_sacks; 1771 int used_sacks;
1770 int found_dup_sack = 0; 1772 bool found_dup_sack = false;
1771 int i, j; 1773 int i, j;
1772 int first_sack_index; 1774 int first_sack_index;
1773 1775
@@ -1798,7 +1800,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1798 used_sacks = 0; 1800 used_sacks = 0;
1799 first_sack_index = 0; 1801 first_sack_index = 0;
1800 for (i = 0; i < num_sacks; i++) { 1802 for (i = 0; i < num_sacks; i++) {
1801 int dup_sack = !i && found_dup_sack; 1803 bool dup_sack = !i && found_dup_sack;
1802 1804
1803 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1805 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1804 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1806 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
@@ -1865,7 +1867,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1865 while (i < used_sacks) { 1867 while (i < used_sacks) {
1866 u32 start_seq = sp[i].start_seq; 1868 u32 start_seq = sp[i].start_seq;
1867 u32 end_seq = sp[i].end_seq; 1869 u32 end_seq = sp[i].end_seq;
1868 int dup_sack = (found_dup_sack && (i == first_sack_index)); 1870 bool dup_sack = (found_dup_sack && (i == first_sack_index));
1869 struct tcp_sack_block *next_dup = NULL; 1871 struct tcp_sack_block *next_dup = NULL;
1870 1872
1871 if (found_dup_sack && ((i + 1) == first_sack_index)) 1873 if (found_dup_sack && ((i + 1) == first_sack_index))
@@ -1967,9 +1969,9 @@ out:
1967} 1969}
1968 1970
1969/* Limits sacked_out so that sum with lost_out isn't ever larger than 1971/* Limits sacked_out so that sum with lost_out isn't ever larger than
1970 * packets_out. Returns zero if sacked_out adjustement wasn't necessary. 1972 * packets_out. Returns false if sacked_out adjustement wasn't necessary.
1971 */ 1973 */
1972static int tcp_limit_reno_sacked(struct tcp_sock *tp) 1974static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
1973{ 1975{
1974 u32 holes; 1976 u32 holes;
1975 1977
@@ -1978,9 +1980,9 @@ static int tcp_limit_reno_sacked(struct tcp_sock *tp)
1978 1980
1979 if ((tp->sacked_out + holes) > tp->packets_out) { 1981 if ((tp->sacked_out + holes) > tp->packets_out) {
1980 tp->sacked_out = tp->packets_out - holes; 1982 tp->sacked_out = tp->packets_out - holes;
1981 return 1; 1983 return true;
1982 } 1984 }
1983 return 0; 1985 return false;
1984} 1986}
1985 1987
1986/* If we receive more dupacks than we expected counting segments 1988/* If we receive more dupacks than we expected counting segments
@@ -2034,40 +2036,40 @@ static int tcp_is_sackfrto(const struct tcp_sock *tp)
2034/* F-RTO can only be used if TCP has never retransmitted anything other than 2036/* F-RTO can only be used if TCP has never retransmitted anything other than
2035 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) 2037 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
2036 */ 2038 */
2037int tcp_use_frto(struct sock *sk) 2039bool tcp_use_frto(struct sock *sk)
2038{ 2040{
2039 const struct tcp_sock *tp = tcp_sk(sk); 2041 const struct tcp_sock *tp = tcp_sk(sk);
2040 const struct inet_connection_sock *icsk = inet_csk(sk); 2042 const struct inet_connection_sock *icsk = inet_csk(sk);
2041 struct sk_buff *skb; 2043 struct sk_buff *skb;
2042 2044
2043 if (!sysctl_tcp_frto) 2045 if (!sysctl_tcp_frto)
2044 return 0; 2046 return false;
2045 2047
2046 /* MTU probe and F-RTO won't really play nicely along currently */ 2048 /* MTU probe and F-RTO won't really play nicely along currently */
2047 if (icsk->icsk_mtup.probe_size) 2049 if (icsk->icsk_mtup.probe_size)
2048 return 0; 2050 return false;
2049 2051
2050 if (tcp_is_sackfrto(tp)) 2052 if (tcp_is_sackfrto(tp))
2051 return 1; 2053 return true;
2052 2054
2053 /* Avoid expensive walking of rexmit queue if possible */ 2055 /* Avoid expensive walking of rexmit queue if possible */
2054 if (tp->retrans_out > 1) 2056 if (tp->retrans_out > 1)
2055 return 0; 2057 return false;
2056 2058
2057 skb = tcp_write_queue_head(sk); 2059 skb = tcp_write_queue_head(sk);
2058 if (tcp_skb_is_last(sk, skb)) 2060 if (tcp_skb_is_last(sk, skb))
2059 return 1; 2061 return true;
2060 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 2062 skb = tcp_write_queue_next(sk, skb); /* Skips head */
2061 tcp_for_write_queue_from(skb, sk) { 2063 tcp_for_write_queue_from(skb, sk) {
2062 if (skb == tcp_send_head(sk)) 2064 if (skb == tcp_send_head(sk))
2063 break; 2065 break;
2064 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2066 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
2065 return 0; 2067 return false;
2066 /* Short-circuit when first non-SACKed skb has been checked */ 2068 /* Short-circuit when first non-SACKed skb has been checked */
2067 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2069 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2068 break; 2070 break;
2069 } 2071 }
2070 return 1; 2072 return true;
2071} 2073}
2072 2074
2073/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO 2075/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
@@ -2303,7 +2305,7 @@ void tcp_enter_loss(struct sock *sk, int how)
2303 * 2305 *
2304 * Do processing similar to RTO timeout. 2306 * Do processing similar to RTO timeout.
2305 */ 2307 */
2306static int tcp_check_sack_reneging(struct sock *sk, int flag) 2308static bool tcp_check_sack_reneging(struct sock *sk, int flag)
2307{ 2309{
2308 if (flag & FLAG_SACK_RENEGING) { 2310 if (flag & FLAG_SACK_RENEGING) {
2309 struct inet_connection_sock *icsk = inet_csk(sk); 2311 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2314,9 +2316,9 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
2314 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 2316 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
2315 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2317 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2316 icsk->icsk_rto, TCP_RTO_MAX); 2318 icsk->icsk_rto, TCP_RTO_MAX);
2317 return 1; 2319 return true;
2318 } 2320 }
2319 return 0; 2321 return false;
2320} 2322}
2321 2323
2322static inline int tcp_fackets_out(const struct tcp_sock *tp) 2324static inline int tcp_fackets_out(const struct tcp_sock *tp)
@@ -2472,28 +2474,28 @@ static inline int tcp_head_timedout(const struct sock *sk)
2472 * Main question: may we further continue forward transmission 2474 * Main question: may we further continue forward transmission
2473 * with the same cwnd? 2475 * with the same cwnd?
2474 */ 2476 */
2475static int tcp_time_to_recover(struct sock *sk, int flag) 2477static bool tcp_time_to_recover(struct sock *sk, int flag)
2476{ 2478{
2477 struct tcp_sock *tp = tcp_sk(sk); 2479 struct tcp_sock *tp = tcp_sk(sk);
2478 __u32 packets_out; 2480 __u32 packets_out;
2479 2481
2480 /* Do not perform any recovery during F-RTO algorithm */ 2482 /* Do not perform any recovery during F-RTO algorithm */
2481 if (tp->frto_counter) 2483 if (tp->frto_counter)
2482 return 0; 2484 return false;
2483 2485
2484 /* Trick#1: The loss is proven. */ 2486 /* Trick#1: The loss is proven. */
2485 if (tp->lost_out) 2487 if (tp->lost_out)
2486 return 1; 2488 return true;
2487 2489
2488 /* Not-A-Trick#2 : Classic rule... */ 2490 /* Not-A-Trick#2 : Classic rule... */
2489 if (tcp_dupack_heuristics(tp) > tp->reordering) 2491 if (tcp_dupack_heuristics(tp) > tp->reordering)
2490 return 1; 2492 return true;
2491 2493
2492 /* Trick#3 : when we use RFC2988 timer restart, fast 2494 /* Trick#3 : when we use RFC2988 timer restart, fast
2493 * retransmit can be triggered by timeout of queue head. 2495 * retransmit can be triggered by timeout of queue head.
2494 */ 2496 */
2495 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) 2497 if (tcp_is_fack(tp) && tcp_head_timedout(sk))
2496 return 1; 2498 return true;
2497 2499
2498 /* Trick#4: It is still not OK... But will it be useful to delay 2500 /* Trick#4: It is still not OK... But will it be useful to delay
2499 * recovery more? 2501 * recovery more?
@@ -2505,7 +2507,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
2505 /* We have nothing to send. This connection is limited 2507 /* We have nothing to send. This connection is limited
2506 * either by receiver window or by application. 2508 * either by receiver window or by application.
2507 */ 2509 */
2508 return 1; 2510 return true;
2509 } 2511 }
2510 2512
2511 /* If a thin stream is detected, retransmit after first 2513 /* If a thin stream is detected, retransmit after first
@@ -2516,7 +2518,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
2516 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && 2518 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
2517 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && 2519 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
2518 tcp_is_sack(tp) && !tcp_send_head(sk)) 2520 tcp_is_sack(tp) && !tcp_send_head(sk))
2519 return 1; 2521 return true;
2520 2522
2521 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious 2523 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
2522 * retransmissions due to small network reorderings, we implement 2524 * retransmissions due to small network reorderings, we implement
@@ -2528,7 +2530,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
2528 !tcp_may_send_now(sk)) 2530 !tcp_may_send_now(sk))
2529 return !tcp_pause_early_retransmit(sk, flag); 2531 return !tcp_pause_early_retransmit(sk, flag);
2530 2532
2531 return 0; 2533 return false;
2532} 2534}
2533 2535
2534/* New heuristics: it is possible only after we switched to restart timer 2536/* New heuristics: it is possible only after we switched to restart timer
@@ -2767,7 +2769,7 @@ static inline int tcp_may_undo(const struct tcp_sock *tp)
2767} 2769}
2768 2770
2769/* People celebrate: "We love our President!" */ 2771/* People celebrate: "We love our President!" */
2770static int tcp_try_undo_recovery(struct sock *sk) 2772static bool tcp_try_undo_recovery(struct sock *sk)
2771{ 2773{
2772 struct tcp_sock *tp = tcp_sk(sk); 2774 struct tcp_sock *tp = tcp_sk(sk);
2773 2775
@@ -2792,10 +2794,10 @@ static int tcp_try_undo_recovery(struct sock *sk)
2792 * is ACKed. For Reno it is MUST to prevent false 2794 * is ACKed. For Reno it is MUST to prevent false
2793 * fast retransmits (RFC2582). SACK TCP is safe. */ 2795 * fast retransmits (RFC2582). SACK TCP is safe. */
2794 tcp_moderate_cwnd(tp); 2796 tcp_moderate_cwnd(tp);
2795 return 1; 2797 return true;
2796 } 2798 }
2797 tcp_set_ca_state(sk, TCP_CA_Open); 2799 tcp_set_ca_state(sk, TCP_CA_Open);
2798 return 0; 2800 return false;
2799} 2801}
2800 2802
2801/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2803/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
@@ -2825,19 +2827,19 @@ static void tcp_try_undo_dsack(struct sock *sk)
2825 * that successive retransmissions of a segment must not advance 2827 * that successive retransmissions of a segment must not advance
2826 * retrans_stamp under any conditions. 2828 * retrans_stamp under any conditions.
2827 */ 2829 */
2828static int tcp_any_retrans_done(const struct sock *sk) 2830static bool tcp_any_retrans_done(const struct sock *sk)
2829{ 2831{
2830 const struct tcp_sock *tp = tcp_sk(sk); 2832 const struct tcp_sock *tp = tcp_sk(sk);
2831 struct sk_buff *skb; 2833 struct sk_buff *skb;
2832 2834
2833 if (tp->retrans_out) 2835 if (tp->retrans_out)
2834 return 1; 2836 return true;
2835 2837
2836 skb = tcp_write_queue_head(sk); 2838 skb = tcp_write_queue_head(sk);
2837 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2839 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2838 return 1; 2840 return true;
2839 2841
2840 return 0; 2842 return false;
2841} 2843}
2842 2844
2843/* Undo during fast recovery after partial ACK. */ 2845/* Undo during fast recovery after partial ACK. */
@@ -2871,7 +2873,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2871} 2873}
2872 2874
2873/* Undo during loss recovery after partial ACK. */ 2875/* Undo during loss recovery after partial ACK. */
2874static int tcp_try_undo_loss(struct sock *sk) 2876static bool tcp_try_undo_loss(struct sock *sk)
2875{ 2877{
2876 struct tcp_sock *tp = tcp_sk(sk); 2878 struct tcp_sock *tp = tcp_sk(sk);
2877 2879
@@ -2893,9 +2895,9 @@ static int tcp_try_undo_loss(struct sock *sk)
2893 tp->undo_marker = 0; 2895 tp->undo_marker = 0;
2894 if (tcp_is_sack(tp)) 2896 if (tcp_is_sack(tp))
2895 tcp_set_ca_state(sk, TCP_CA_Open); 2897 tcp_set_ca_state(sk, TCP_CA_Open);
2896 return 1; 2898 return true;
2897 } 2899 }
2898 return 0; 2900 return false;
2899} 2901}
2900 2902
2901static inline void tcp_complete_cwr(struct sock *sk) 2903static inline void tcp_complete_cwr(struct sock *sk)
@@ -3370,7 +3372,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3370 const struct inet_connection_sock *icsk = inet_csk(sk); 3372 const struct inet_connection_sock *icsk = inet_csk(sk);
3371 struct sk_buff *skb; 3373 struct sk_buff *skb;
3372 u32 now = tcp_time_stamp; 3374 u32 now = tcp_time_stamp;
3373 int fully_acked = 1; 3375 int fully_acked = true;
3374 int flag = 0; 3376 int flag = 0;
3375 u32 pkts_acked = 0; 3377 u32 pkts_acked = 0;
3376 u32 reord = tp->packets_out; 3378 u32 reord = tp->packets_out;
@@ -3394,7 +3396,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3394 if (!acked_pcount) 3396 if (!acked_pcount)
3395 break; 3397 break;
3396 3398
3397 fully_acked = 0; 3399 fully_acked = false;
3398 } else { 3400 } else {
3399 acked_pcount = tcp_skb_pcount(skb); 3401 acked_pcount = tcp_skb_pcount(skb);
3400 } 3402 }
@@ -3673,7 +3675,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
3673 * to prove that the RTO is indeed spurious. It transfers the control 3675 * to prove that the RTO is indeed spurious. It transfers the control
3674 * from F-RTO to the conventional RTO recovery 3676 * from F-RTO to the conventional RTO recovery
3675 */ 3677 */
3676static int tcp_process_frto(struct sock *sk, int flag) 3678static bool tcp_process_frto(struct sock *sk, int flag)
3677{ 3679{
3678 struct tcp_sock *tp = tcp_sk(sk); 3680 struct tcp_sock *tp = tcp_sk(sk);
3679 3681
@@ -3689,7 +3691,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3689 3691
3690 if (!before(tp->snd_una, tp->frto_highmark)) { 3692 if (!before(tp->snd_una, tp->frto_highmark)) {
3691 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); 3693 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
3692 return 1; 3694 return true;
3693 } 3695 }
3694 3696
3695 if (!tcp_is_sackfrto(tp)) { 3697 if (!tcp_is_sackfrto(tp)) {
@@ -3698,19 +3700,19 @@ static int tcp_process_frto(struct sock *sk, int flag)
3698 * data, winupdate 3700 * data, winupdate
3699 */ 3701 */
3700 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) 3702 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
3701 return 1; 3703 return true;
3702 3704
3703 if (!(flag & FLAG_DATA_ACKED)) { 3705 if (!(flag & FLAG_DATA_ACKED)) {
3704 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), 3706 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
3705 flag); 3707 flag);
3706 return 1; 3708 return true;
3707 } 3709 }
3708 } else { 3710 } else {
3709 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3711 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3710 /* Prevent sending of new data. */ 3712 /* Prevent sending of new data. */
3711 tp->snd_cwnd = min(tp->snd_cwnd, 3713 tp->snd_cwnd = min(tp->snd_cwnd,
3712 tcp_packets_in_flight(tp)); 3714 tcp_packets_in_flight(tp));
3713 return 1; 3715 return true;
3714 } 3716 }
3715 3717
3716 if ((tp->frto_counter >= 2) && 3718 if ((tp->frto_counter >= 2) &&
@@ -3720,10 +3722,10 @@ static int tcp_process_frto(struct sock *sk, int flag)
3720 /* RFC4138 shortcoming (see comment above) */ 3722 /* RFC4138 shortcoming (see comment above) */
3721 if (!(flag & FLAG_FORWARD_PROGRESS) && 3723 if (!(flag & FLAG_FORWARD_PROGRESS) &&
3722 (flag & FLAG_NOT_DUP)) 3724 (flag & FLAG_NOT_DUP))
3723 return 1; 3725 return true;
3724 3726
3725 tcp_enter_frto_loss(sk, 3, flag); 3727 tcp_enter_frto_loss(sk, 3, flag);
3726 return 1; 3728 return true;
3727 } 3729 }
3728 } 3730 }
3729 3731
@@ -3735,7 +3737,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3735 if (!tcp_may_send_now(sk)) 3737 if (!tcp_may_send_now(sk))
3736 tcp_enter_frto_loss(sk, 2, flag); 3738 tcp_enter_frto_loss(sk, 2, flag);
3737 3739
3738 return 1; 3740 return true;
3739 } else { 3741 } else {
3740 switch (sysctl_tcp_frto_response) { 3742 switch (sysctl_tcp_frto_response) {
3741 case 2: 3743 case 2:
@@ -3752,7 +3754,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3752 tp->undo_marker = 0; 3754 tp->undo_marker = 0;
3753 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); 3755 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3754 } 3756 }
3755 return 0; 3757 return false;
3756} 3758}
3757 3759
3758/* This routine deals with incoming acks, but not outgoing ones. */ 3760/* This routine deals with incoming acks, but not outgoing ones. */
@@ -3770,7 +3772,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3770 int prior_sacked = tp->sacked_out; 3772 int prior_sacked = tp->sacked_out;
3771 int pkts_acked = 0; 3773 int pkts_acked = 0;
3772 int newly_acked_sacked = 0; 3774 int newly_acked_sacked = 0;
3773 int frto_cwnd = 0; 3775 bool frto_cwnd = false;
3774 3776
3775 /* If the ack is older than previous acks 3777 /* If the ack is older than previous acks
3776 * then we can probably ignore it. 3778 * then we can probably ignore it.
@@ -4025,7 +4027,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
4025} 4027}
4026EXPORT_SYMBOL(tcp_parse_options); 4028EXPORT_SYMBOL(tcp_parse_options);
4027 4029
4028static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 4030static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
4029{ 4031{
4030 const __be32 *ptr = (const __be32 *)(th + 1); 4032 const __be32 *ptr = (const __be32 *)(th + 1);
4031 4033
@@ -4036,31 +4038,31 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
4036 tp->rx_opt.rcv_tsval = ntohl(*ptr); 4038 tp->rx_opt.rcv_tsval = ntohl(*ptr);
4037 ++ptr; 4039 ++ptr;
4038 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 4040 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
4039 return 1; 4041 return true;
4040 } 4042 }
4041 return 0; 4043 return false;
4042} 4044}
4043 4045
4044/* Fast parse options. This hopes to only see timestamps. 4046/* Fast parse options. This hopes to only see timestamps.
4045 * If it is wrong it falls back on tcp_parse_options(). 4047 * If it is wrong it falls back on tcp_parse_options().
4046 */ 4048 */
4047static int tcp_fast_parse_options(const struct sk_buff *skb, 4049static bool tcp_fast_parse_options(const struct sk_buff *skb,
4048 const struct tcphdr *th, 4050 const struct tcphdr *th,
4049 struct tcp_sock *tp, const u8 **hvpp) 4051 struct tcp_sock *tp, const u8 **hvpp)
4050{ 4052{
4051 /* In the spirit of fast parsing, compare doff directly to constant 4053 /* In the spirit of fast parsing, compare doff directly to constant
4052 * values. Because equality is used, short doff can be ignored here. 4054 * values. Because equality is used, short doff can be ignored here.
4053 */ 4055 */
4054 if (th->doff == (sizeof(*th) / 4)) { 4056 if (th->doff == (sizeof(*th) / 4)) {
4055 tp->rx_opt.saw_tstamp = 0; 4057 tp->rx_opt.saw_tstamp = 0;
4056 return 0; 4058 return false;
4057 } else if (tp->rx_opt.tstamp_ok && 4059 } else if (tp->rx_opt.tstamp_ok &&
4058 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 4060 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
4059 if (tcp_parse_aligned_timestamp(tp, th)) 4061 if (tcp_parse_aligned_timestamp(tp, th))
4060 return 1; 4062 return true;
4061 } 4063 }
4062 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); 4064 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
4063 return 1; 4065 return true;
4064} 4066}
4065 4067
4066#ifdef CONFIG_TCP_MD5SIG 4068#ifdef CONFIG_TCP_MD5SIG
@@ -4301,7 +4303,7 @@ static void tcp_fin(struct sock *sk)
4301 } 4303 }
4302} 4304}
4303 4305
4304static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 4306static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4305 u32 end_seq) 4307 u32 end_seq)
4306{ 4308{
4307 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 4309 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
@@ -4309,9 +4311,9 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4309 sp->start_seq = seq; 4311 sp->start_seq = seq;
4310 if (after(end_seq, sp->end_seq)) 4312 if (after(end_seq, sp->end_seq))
4311 sp->end_seq = end_seq; 4313 sp->end_seq = end_seq;
4312 return 1; 4314 return true;
4313 } 4315 }
4314 return 0; 4316 return false;
4315} 4317}
4316 4318
4317static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4319static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
@@ -4507,7 +4509,7 @@ static void tcp_ofo_queue(struct sock *sk)
4507 } 4509 }
4508} 4510}
4509 4511
4510static int tcp_prune_ofo_queue(struct sock *sk); 4512static bool tcp_prune_ofo_queue(struct sock *sk);
4511static int tcp_prune_queue(struct sock *sk); 4513static int tcp_prune_queue(struct sock *sk);
4512 4514
4513static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) 4515static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
@@ -5092,10 +5094,10 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
5092 * Purge the out-of-order queue. 5094 * Purge the out-of-order queue.
5093 * Return true if queue was pruned. 5095 * Return true if queue was pruned.
5094 */ 5096 */
5095static int tcp_prune_ofo_queue(struct sock *sk) 5097static bool tcp_prune_ofo_queue(struct sock *sk)
5096{ 5098{
5097 struct tcp_sock *tp = tcp_sk(sk); 5099 struct tcp_sock *tp = tcp_sk(sk);
5098 int res = 0; 5100 bool res = false;
5099 5101
5100 if (!skb_queue_empty(&tp->out_of_order_queue)) { 5102 if (!skb_queue_empty(&tp->out_of_order_queue)) {
5101 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); 5103 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
@@ -5109,7 +5111,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
5109 if (tp->rx_opt.sack_ok) 5111 if (tp->rx_opt.sack_ok)
5110 tcp_sack_reset(&tp->rx_opt); 5112 tcp_sack_reset(&tp->rx_opt);
5111 sk_mem_reclaim(sk); 5113 sk_mem_reclaim(sk);
5112 res = 1; 5114 res = true;
5113 } 5115 }
5114 return res; 5116 return res;
5115} 5117}
@@ -5186,7 +5188,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
5186 tp->snd_cwnd_stamp = tcp_time_stamp; 5188 tp->snd_cwnd_stamp = tcp_time_stamp;
5187} 5189}
5188 5190
5189static int tcp_should_expand_sndbuf(const struct sock *sk) 5191static bool tcp_should_expand_sndbuf(const struct sock *sk)
5190{ 5192{
5191 const struct tcp_sock *tp = tcp_sk(sk); 5193 const struct tcp_sock *tp = tcp_sk(sk);
5192 5194
@@ -5194,21 +5196,21 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
5194 * not modify it. 5196 * not modify it.
5195 */ 5197 */
5196 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 5198 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
5197 return 0; 5199 return false;
5198 5200
5199 /* If we are under global TCP memory pressure, do not expand. */ 5201 /* If we are under global TCP memory pressure, do not expand. */
5200 if (sk_under_memory_pressure(sk)) 5202 if (sk_under_memory_pressure(sk))
5201 return 0; 5203 return false;
5202 5204
5203 /* If we are under soft global TCP memory pressure, do not expand. */ 5205 /* If we are under soft global TCP memory pressure, do not expand. */
5204 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 5206 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
5205 return 0; 5207 return false;
5206 5208
5207 /* If we filled the congestion window, do not expand. */ 5209 /* If we filled the congestion window, do not expand. */
5208 if (tp->packets_out >= tp->snd_cwnd) 5210 if (tp->packets_out >= tp->snd_cwnd)
5209 return 0; 5211 return false;
5210 5212
5211 return 1; 5213 return true;
5212} 5214}
5213 5215
5214/* When incoming ACK allowed to free some skb from write_queue, 5216/* When incoming ACK allowed to free some skb from write_queue,
@@ -5434,16 +5436,16 @@ static inline int tcp_checksum_complete_user(struct sock *sk,
5434} 5436}
5435 5437
5436#ifdef CONFIG_NET_DMA 5438#ifdef CONFIG_NET_DMA
5437static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, 5439static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5438 int hlen) 5440 int hlen)
5439{ 5441{
5440 struct tcp_sock *tp = tcp_sk(sk); 5442 struct tcp_sock *tp = tcp_sk(sk);
5441 int chunk = skb->len - hlen; 5443 int chunk = skb->len - hlen;
5442 int dma_cookie; 5444 int dma_cookie;
5443 int copied_early = 0; 5445 bool copied_early = false;
5444 5446
5445 if (tp->ucopy.wakeup) 5447 if (tp->ucopy.wakeup)
5446 return 0; 5448 return false;
5447 5449
5448 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5450 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5449 tp->ucopy.dma_chan = net_dma_find_channel(); 5451 tp->ucopy.dma_chan = net_dma_find_channel();
@@ -5459,7 +5461,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5459 goto out; 5461 goto out;
5460 5462
5461 tp->ucopy.dma_cookie = dma_cookie; 5463 tp->ucopy.dma_cookie = dma_cookie;
5462 copied_early = 1; 5464 copied_early = true;
5463 5465
5464 tp->ucopy.len -= chunk; 5466 tp->ucopy.len -= chunk;
5465 tp->copied_seq += chunk; 5467 tp->copied_seq += chunk;