aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-16 19:15:34 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-17 14:59:59 -0400
commita2a385d627e1549da4b43a8b3dfe370589766e1c (patch)
treed61e9913497c6c14406032f6a0822738707f1abf /net
parente005d193d55ee5f757b13306112d8c23aac27a88 (diff)
tcp: bool conversions
bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp.c20
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_hybla.c10
-rw-r--r--net/ipv4/tcp_input.c214
-rw-r--r--net/ipv4/tcp_ipv4.c26
-rw-r--r--net/ipv4/tcp_minisocks.c24
-rw-r--r--net/ipv4/tcp_output.c75
-rw-r--r--net/ipv6/tcp_ipv6.c4
8 files changed, 191 insertions, 188 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e8a80d0b5b3c..63ddaee7209f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
593 tp->pushed_seq = tp->write_seq; 593 tp->pushed_seq = tp->write_seq;
594} 594}
595 595
596static inline int forced_push(const struct tcp_sock *tp) 596static inline bool forced_push(const struct tcp_sock *tp)
597{ 597{
598 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 598 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
599} 599}
@@ -1082,7 +1082,7 @@ new_segment:
1082 if (err) 1082 if (err)
1083 goto do_fault; 1083 goto do_fault;
1084 } else { 1084 } else {
1085 int merge = 0; 1085 bool merge = false;
1086 int i = skb_shinfo(skb)->nr_frags; 1086 int i = skb_shinfo(skb)->nr_frags;
1087 struct page *page = sk->sk_sndmsg_page; 1087 struct page *page = sk->sk_sndmsg_page;
1088 int off; 1088 int off;
@@ -1096,7 +1096,7 @@ new_segment:
1096 off != PAGE_SIZE) { 1096 off != PAGE_SIZE) {
1097 /* We can extend the last page 1097 /* We can extend the last page
1098 * fragment. */ 1098 * fragment. */
1099 merge = 1; 1099 merge = true;
1100 } else if (i == MAX_SKB_FRAGS || !sg) { 1100 } else if (i == MAX_SKB_FRAGS || !sg) {
1101 /* Need to add new fragment and cannot 1101 /* Need to add new fragment and cannot
1102 * do this because interface is non-SG, 1102 * do this because interface is non-SG,
@@ -1293,7 +1293,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1293void tcp_cleanup_rbuf(struct sock *sk, int copied) 1293void tcp_cleanup_rbuf(struct sock *sk, int copied)
1294{ 1294{
1295 struct tcp_sock *tp = tcp_sk(sk); 1295 struct tcp_sock *tp = tcp_sk(sk);
1296 int time_to_ack = 0; 1296 bool time_to_ack = false;
1297 1297
1298 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1298 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1299 1299
@@ -1319,7 +1319,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1319 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1319 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1320 !icsk->icsk_ack.pingpong)) && 1320 !icsk->icsk_ack.pingpong)) &&
1321 !atomic_read(&sk->sk_rmem_alloc))) 1321 !atomic_read(&sk->sk_rmem_alloc)))
1322 time_to_ack = 1; 1322 time_to_ack = true;
1323 } 1323 }
1324 1324
1325 /* We send an ACK if we can now advertise a non-zero window 1325 /* We send an ACK if we can now advertise a non-zero window
@@ -1341,7 +1341,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1341 * "Lots" means "at least twice" here. 1341 * "Lots" means "at least twice" here.
1342 */ 1342 */
1343 if (new_window && new_window >= 2 * rcv_window_now) 1343 if (new_window && new_window >= 2 * rcv_window_now)
1344 time_to_ack = 1; 1344 time_to_ack = true;
1345 } 1345 }
1346 } 1346 }
1347 if (time_to_ack) 1347 if (time_to_ack)
@@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(tcp_close);
2171 2171
2172/* These states need RST on ABORT according to RFC793 */ 2172/* These states need RST on ABORT according to RFC793 */
2173 2173
2174static inline int tcp_need_reset(int state) 2174static inline bool tcp_need_reset(int state)
2175{ 2175{
2176 return (1 << state) & 2176 return (1 << state) &
2177 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2177 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
@@ -2245,7 +2245,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2245} 2245}
2246EXPORT_SYMBOL(tcp_disconnect); 2246EXPORT_SYMBOL(tcp_disconnect);
2247 2247
2248static inline int tcp_can_repair_sock(struct sock *sk) 2248static inline bool tcp_can_repair_sock(const struct sock *sk)
2249{ 2249{
2250 return capable(CAP_NET_ADMIN) && 2250 return capable(CAP_NET_ADMIN) &&
2251 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); 2251 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
@@ -3172,13 +3172,13 @@ out_free:
3172struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 3172struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
3173{ 3173{
3174 struct tcp_md5sig_pool __percpu *pool; 3174 struct tcp_md5sig_pool __percpu *pool;
3175 int alloc = 0; 3175 bool alloc = false;
3176 3176
3177retry: 3177retry:
3178 spin_lock_bh(&tcp_md5sig_pool_lock); 3178 spin_lock_bh(&tcp_md5sig_pool_lock);
3179 pool = tcp_md5sig_pool; 3179 pool = tcp_md5sig_pool;
3180 if (tcp_md5sig_users++ == 0) { 3180 if (tcp_md5sig_users++ == 0) {
3181 alloc = 1; 3181 alloc = true;
3182 spin_unlock_bh(&tcp_md5sig_pool_lock); 3182 spin_unlock_bh(&tcp_md5sig_pool_lock);
3183 } else if (!pool) { 3183 } else if (!pool) {
3184 tcp_md5sig_users--; 3184 tcp_md5sig_users--;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 272a84593c85..04dbd7ae7c62 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
280/* RFC2861 Check whether we are limited by application or congestion window 280/* RFC2861 Check whether we are limited by application or congestion window
281 * This is the inverse of cwnd check in tcp_tso_should_defer 281 * This is the inverse of cwnd check in tcp_tso_should_defer
282 */ 282 */
283int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) 283bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
284{ 284{
285 const struct tcp_sock *tp = tcp_sk(sk); 285 const struct tcp_sock *tp = tcp_sk(sk);
286 u32 left; 286 u32 left;
287 287
288 if (in_flight >= tp->snd_cwnd) 288 if (in_flight >= tp->snd_cwnd)
289 return 1; 289 return true;
290 290
291 left = tp->snd_cwnd - in_flight; 291 left = tp->snd_cwnd - in_flight;
292 if (sk_can_gso(sk) && 292 if (sk_can_gso(sk) &&
293 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && 293 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
294 left * tp->mss_cache < sk->sk_gso_max_size) 294 left * tp->mss_cache < sk->sk_gso_max_size)
295 return 1; 295 return true;
296 return left <= tcp_max_tso_deferred_mss(tp); 296 return left <= tcp_max_tso_deferred_mss(tp);
297} 297}
298EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); 298EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index fe3ecf484b44..57bdd17dff4d 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -15,7 +15,7 @@
15 15
16/* Tcp Hybla structure. */ 16/* Tcp Hybla structure. */
17struct hybla { 17struct hybla {
18 u8 hybla_en; 18 bool hybla_en;
19 u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */ 19 u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
20 u32 rho; /* Rho parameter, integer part */ 20 u32 rho; /* Rho parameter, integer part */
21 u32 rho2; /* Rho * Rho, integer part */ 21 u32 rho2; /* Rho * Rho, integer part */
@@ -24,8 +24,7 @@ struct hybla {
24 u32 minrtt; /* Minimum smoothed round trip time value seen */ 24 u32 minrtt; /* Minimum smoothed round trip time value seen */
25}; 25};
26 26
27/* Hybla reference round trip time (default= 1/40 sec = 25 ms), 27/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
28 expressed in jiffies */
29static int rtt0 = 25; 28static int rtt0 = 25;
30module_param(rtt0, int, 0644); 29module_param(rtt0, int, 0644);
31MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); 30MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
@@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk)
39 ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8); 38 ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
40 ca->rho = ca->rho_3ls >> 3; 39 ca->rho = ca->rho_3ls >> 3;
41 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1; 40 ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
42 ca->rho2 = ca->rho2_7ls >>7; 41 ca->rho2 = ca->rho2_7ls >> 7;
43} 42}
44 43
45static void hybla_init(struct sock *sk) 44static void hybla_init(struct sock *sk)
@@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk)
52 ca->rho_3ls = 0; 51 ca->rho_3ls = 0;
53 ca->rho2_7ls = 0; 52 ca->rho2_7ls = 0;
54 ca->snd_cwnd_cents = 0; 53 ca->snd_cwnd_cents = 0;
55 ca->hybla_en = 1; 54 ca->hybla_en = true;
56 tp->snd_cwnd = 2; 55 tp->snd_cwnd = 2;
57 tp->snd_cwnd_clamp = 65535; 56 tp->snd_cwnd_clamp = 65535;
58 57
@@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk)
67static void hybla_state(struct sock *sk, u8 ca_state) 66static void hybla_state(struct sock *sk, u8 ca_state)
68{ 67{
69 struct hybla *ca = inet_csk_ca(sk); 68 struct hybla *ca = inet_csk_ca(sk);
69
70 ca->hybla_en = (ca_state == TCP_CA_Open); 70 ca->hybla_en = (ca_state == TCP_CA_Open);
71} 71}
72 72
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb97787be757..b961ef54b17d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -196,9 +196,10 @@ static void tcp_enter_quickack_mode(struct sock *sk)
196 * and the session is not interactive. 196 * and the session is not interactive.
197 */ 197 */
198 198
199static inline int tcp_in_quickack_mode(const struct sock *sk) 199static inline bool tcp_in_quickack_mode(const struct sock *sk)
200{ 200{
201 const struct inet_connection_sock *icsk = inet_csk(sk); 201 const struct inet_connection_sock *icsk = inet_csk(sk);
202
202 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 203 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
203} 204}
204 205
@@ -253,11 +254,11 @@ static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
253 tp->ecn_flags &= ~TCP_ECN_OK; 254 tp->ecn_flags &= ~TCP_ECN_OK;
254} 255}
255 256
256static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 257static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
257{ 258{
258 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 259 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
259 return 1; 260 return true;
260 return 0; 261 return false;
261} 262}
262 263
263/* Buffer size and advertised window tuning. 264/* Buffer size and advertised window tuning.
@@ -1123,36 +1124,36 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
1123 * the exact amount is rather hard to quantify. However, tp->max_window can 1124 * the exact amount is rather hard to quantify. However, tp->max_window can
1124 * be used as an exaggerated estimate. 1125 * be used as an exaggerated estimate.
1125 */ 1126 */
1126static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, 1127static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1127 u32 start_seq, u32 end_seq) 1128 u32 start_seq, u32 end_seq)
1128{ 1129{
1129 /* Too far in future, or reversed (interpretation is ambiguous) */ 1130 /* Too far in future, or reversed (interpretation is ambiguous) */
1130 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1131 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1131 return 0; 1132 return false;
1132 1133
1133 /* Nasty start_seq wrap-around check (see comments above) */ 1134 /* Nasty start_seq wrap-around check (see comments above) */
1134 if (!before(start_seq, tp->snd_nxt)) 1135 if (!before(start_seq, tp->snd_nxt))
1135 return 0; 1136 return false;
1136 1137
1137 /* In outstanding window? ...This is valid exit for D-SACKs too. 1138 /* In outstanding window? ...This is valid exit for D-SACKs too.
1138 * start_seq == snd_una is non-sensical (see comments above) 1139 * start_seq == snd_una is non-sensical (see comments above)
1139 */ 1140 */
1140 if (after(start_seq, tp->snd_una)) 1141 if (after(start_seq, tp->snd_una))
1141 return 1; 1142 return true;
1142 1143
1143 if (!is_dsack || !tp->undo_marker) 1144 if (!is_dsack || !tp->undo_marker)
1144 return 0; 1145 return false;
1145 1146
1146 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1147 /* ...Then it's D-SACK, and must reside below snd_una completely */
1147 if (after(end_seq, tp->snd_una)) 1148 if (after(end_seq, tp->snd_una))
1148 return 0; 1149 return false;
1149 1150
1150 if (!before(start_seq, tp->undo_marker)) 1151 if (!before(start_seq, tp->undo_marker))
1151 return 1; 1152 return true;
1152 1153
1153 /* Too old */ 1154 /* Too old */
1154 if (!after(end_seq, tp->undo_marker)) 1155 if (!after(end_seq, tp->undo_marker))
1155 return 0; 1156 return false;
1156 1157
1157 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1158 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1158 * start_seq < undo_marker and end_seq >= undo_marker. 1159 * start_seq < undo_marker and end_seq >= undo_marker.
@@ -1224,17 +1225,17 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1224 tp->lost_retrans_low = new_low_seq; 1225 tp->lost_retrans_low = new_low_seq;
1225} 1226}
1226 1227
1227static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1228static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1228 struct tcp_sack_block_wire *sp, int num_sacks, 1229 struct tcp_sack_block_wire *sp, int num_sacks,
1229 u32 prior_snd_una) 1230 u32 prior_snd_una)
1230{ 1231{
1231 struct tcp_sock *tp = tcp_sk(sk); 1232 struct tcp_sock *tp = tcp_sk(sk);
1232 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1233 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1233 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1234 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1234 int dup_sack = 0; 1235 bool dup_sack = false;
1235 1236
1236 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1237 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1237 dup_sack = 1; 1238 dup_sack = true;
1238 tcp_dsack_seen(tp); 1239 tcp_dsack_seen(tp);
1239 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1240 } else if (num_sacks > 1) { 1241 } else if (num_sacks > 1) {
@@ -1243,7 +1244,7 @@ static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1243 1244
1244 if (!after(end_seq_0, end_seq_1) && 1245 if (!after(end_seq_0, end_seq_1) &&
1245 !before(start_seq_0, start_seq_1)) { 1246 !before(start_seq_0, start_seq_1)) {
1246 dup_sack = 1; 1247 dup_sack = true;
1247 tcp_dsack_seen(tp); 1248 tcp_dsack_seen(tp);
1248 NET_INC_STATS_BH(sock_net(sk), 1249 NET_INC_STATS_BH(sock_net(sk),
1249 LINUX_MIB_TCPDSACKOFORECV); 1250 LINUX_MIB_TCPDSACKOFORECV);
@@ -1274,9 +1275,10 @@ struct tcp_sacktag_state {
1274 * FIXME: this could be merged to shift decision code 1275 * FIXME: this could be merged to shift decision code
1275 */ 1276 */
1276static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1277static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1277 u32 start_seq, u32 end_seq) 1278 u32 start_seq, u32 end_seq)
1278{ 1279{
1279 int in_sack, err; 1280 int err;
1281 bool in_sack;
1280 unsigned int pkt_len; 1282 unsigned int pkt_len;
1281 unsigned int mss; 1283 unsigned int mss;
1282 1284
@@ -1322,7 +1324,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1322static u8 tcp_sacktag_one(struct sock *sk, 1324static u8 tcp_sacktag_one(struct sock *sk,
1323 struct tcp_sacktag_state *state, u8 sacked, 1325 struct tcp_sacktag_state *state, u8 sacked,
1324 u32 start_seq, u32 end_seq, 1326 u32 start_seq, u32 end_seq,
1325 int dup_sack, int pcount) 1327 bool dup_sack, int pcount)
1326{ 1328{
1327 struct tcp_sock *tp = tcp_sk(sk); 1329 struct tcp_sock *tp = tcp_sk(sk);
1328 int fack_count = state->fack_count; 1330 int fack_count = state->fack_count;
@@ -1402,10 +1404,10 @@ static u8 tcp_sacktag_one(struct sock *sk,
1402/* Shift newly-SACKed bytes from this skb to the immediately previous 1404/* Shift newly-SACKed bytes from this skb to the immediately previous
1403 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1405 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1404 */ 1406 */
1405static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1407static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1406 struct tcp_sacktag_state *state, 1408 struct tcp_sacktag_state *state,
1407 unsigned int pcount, int shifted, int mss, 1409 unsigned int pcount, int shifted, int mss,
1408 int dup_sack) 1410 bool dup_sack)
1409{ 1411{
1410 struct tcp_sock *tp = tcp_sk(sk); 1412 struct tcp_sock *tp = tcp_sk(sk);
1411 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1413 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1455,7 +1457,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1455 if (skb->len > 0) { 1457 if (skb->len > 0) {
1456 BUG_ON(!tcp_skb_pcount(skb)); 1458 BUG_ON(!tcp_skb_pcount(skb));
1457 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1459 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1458 return 0; 1460 return false;
1459 } 1461 }
1460 1462
1461 /* Whole SKB was eaten :-) */ 1463 /* Whole SKB was eaten :-) */
@@ -1478,7 +1480,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1478 1480
1479 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); 1481 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
1480 1482
1481 return 1; 1483 return true;
1482} 1484}
1483 1485
1484/* I wish gso_size would have a bit more sane initialization than 1486/* I wish gso_size would have a bit more sane initialization than
@@ -1501,7 +1503,7 @@ static int skb_can_shift(const struct sk_buff *skb)
1501static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1503static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1502 struct tcp_sacktag_state *state, 1504 struct tcp_sacktag_state *state,
1503 u32 start_seq, u32 end_seq, 1505 u32 start_seq, u32 end_seq,
1504 int dup_sack) 1506 bool dup_sack)
1505{ 1507{
1506 struct tcp_sock *tp = tcp_sk(sk); 1508 struct tcp_sock *tp = tcp_sk(sk);
1507 struct sk_buff *prev; 1509 struct sk_buff *prev;
@@ -1640,14 +1642,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1640 struct tcp_sack_block *next_dup, 1642 struct tcp_sack_block *next_dup,
1641 struct tcp_sacktag_state *state, 1643 struct tcp_sacktag_state *state,
1642 u32 start_seq, u32 end_seq, 1644 u32 start_seq, u32 end_seq,
1643 int dup_sack_in) 1645 bool dup_sack_in)
1644{ 1646{
1645 struct tcp_sock *tp = tcp_sk(sk); 1647 struct tcp_sock *tp = tcp_sk(sk);
1646 struct sk_buff *tmp; 1648 struct sk_buff *tmp;
1647 1649
1648 tcp_for_write_queue_from(skb, sk) { 1650 tcp_for_write_queue_from(skb, sk) {
1649 int in_sack = 0; 1651 int in_sack = 0;
1650 int dup_sack = dup_sack_in; 1652 bool dup_sack = dup_sack_in;
1651 1653
1652 if (skb == tcp_send_head(sk)) 1654 if (skb == tcp_send_head(sk))
1653 break; 1655 break;
@@ -1662,7 +1664,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1662 next_dup->start_seq, 1664 next_dup->start_seq,
1663 next_dup->end_seq); 1665 next_dup->end_seq);
1664 if (in_sack > 0) 1666 if (in_sack > 0)
1665 dup_sack = 1; 1667 dup_sack = true;
1666 } 1668 }
1667 1669
1668 /* skb reference here is a bit tricky to get right, since 1670 /* skb reference here is a bit tricky to get right, since
@@ -1767,7 +1769,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1767 struct sk_buff *skb; 1769 struct sk_buff *skb;
1768 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1770 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1769 int used_sacks; 1771 int used_sacks;
1770 int found_dup_sack = 0; 1772 bool found_dup_sack = false;
1771 int i, j; 1773 int i, j;
1772 int first_sack_index; 1774 int first_sack_index;
1773 1775
@@ -1798,7 +1800,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1798 used_sacks = 0; 1800 used_sacks = 0;
1799 first_sack_index = 0; 1801 first_sack_index = 0;
1800 for (i = 0; i < num_sacks; i++) { 1802 for (i = 0; i < num_sacks; i++) {
1801 int dup_sack = !i && found_dup_sack; 1803 bool dup_sack = !i && found_dup_sack;
1802 1804
1803 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1805 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1804 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1806 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
@@ -1865,7 +1867,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1865 while (i < used_sacks) { 1867 while (i < used_sacks) {
1866 u32 start_seq = sp[i].start_seq; 1868 u32 start_seq = sp[i].start_seq;
1867 u32 end_seq = sp[i].end_seq; 1869 u32 end_seq = sp[i].end_seq;
1868 int dup_sack = (found_dup_sack && (i == first_sack_index)); 1870 bool dup_sack = (found_dup_sack && (i == first_sack_index));
1869 struct tcp_sack_block *next_dup = NULL; 1871 struct tcp_sack_block *next_dup = NULL;
1870 1872
1871 if (found_dup_sack && ((i + 1) == first_sack_index)) 1873 if (found_dup_sack && ((i + 1) == first_sack_index))
@@ -1967,9 +1969,9 @@ out:
1967} 1969}
1968 1970
1969/* Limits sacked_out so that sum with lost_out isn't ever larger than 1971/* Limits sacked_out so that sum with lost_out isn't ever larger than
1970 * packets_out. Returns zero if sacked_out adjustement wasn't necessary. 1972 * packets_out. Returns false if sacked_out adjustement wasn't necessary.
1971 */ 1973 */
1972static int tcp_limit_reno_sacked(struct tcp_sock *tp) 1974static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
1973{ 1975{
1974 u32 holes; 1976 u32 holes;
1975 1977
@@ -1978,9 +1980,9 @@ static int tcp_limit_reno_sacked(struct tcp_sock *tp)
1978 1980
1979 if ((tp->sacked_out + holes) > tp->packets_out) { 1981 if ((tp->sacked_out + holes) > tp->packets_out) {
1980 tp->sacked_out = tp->packets_out - holes; 1982 tp->sacked_out = tp->packets_out - holes;
1981 return 1; 1983 return true;
1982 } 1984 }
1983 return 0; 1985 return false;
1984} 1986}
1985 1987
1986/* If we receive more dupacks than we expected counting segments 1988/* If we receive more dupacks than we expected counting segments
@@ -2034,40 +2036,40 @@ static int tcp_is_sackfrto(const struct tcp_sock *tp)
2034/* F-RTO can only be used if TCP has never retransmitted anything other than 2036/* F-RTO can only be used if TCP has never retransmitted anything other than
2035 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) 2037 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
2036 */ 2038 */
2037int tcp_use_frto(struct sock *sk) 2039bool tcp_use_frto(struct sock *sk)
2038{ 2040{
2039 const struct tcp_sock *tp = tcp_sk(sk); 2041 const struct tcp_sock *tp = tcp_sk(sk);
2040 const struct inet_connection_sock *icsk = inet_csk(sk); 2042 const struct inet_connection_sock *icsk = inet_csk(sk);
2041 struct sk_buff *skb; 2043 struct sk_buff *skb;
2042 2044
2043 if (!sysctl_tcp_frto) 2045 if (!sysctl_tcp_frto)
2044 return 0; 2046 return false;
2045 2047
2046 /* MTU probe and F-RTO won't really play nicely along currently */ 2048 /* MTU probe and F-RTO won't really play nicely along currently */
2047 if (icsk->icsk_mtup.probe_size) 2049 if (icsk->icsk_mtup.probe_size)
2048 return 0; 2050 return false;
2049 2051
2050 if (tcp_is_sackfrto(tp)) 2052 if (tcp_is_sackfrto(tp))
2051 return 1; 2053 return true;
2052 2054
2053 /* Avoid expensive walking of rexmit queue if possible */ 2055 /* Avoid expensive walking of rexmit queue if possible */
2054 if (tp->retrans_out > 1) 2056 if (tp->retrans_out > 1)
2055 return 0; 2057 return false;
2056 2058
2057 skb = tcp_write_queue_head(sk); 2059 skb = tcp_write_queue_head(sk);
2058 if (tcp_skb_is_last(sk, skb)) 2060 if (tcp_skb_is_last(sk, skb))
2059 return 1; 2061 return true;
2060 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 2062 skb = tcp_write_queue_next(sk, skb); /* Skips head */
2061 tcp_for_write_queue_from(skb, sk) { 2063 tcp_for_write_queue_from(skb, sk) {
2062 if (skb == tcp_send_head(sk)) 2064 if (skb == tcp_send_head(sk))
2063 break; 2065 break;
2064 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2066 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
2065 return 0; 2067 return false;
2066 /* Short-circuit when first non-SACKed skb has been checked */ 2068 /* Short-circuit when first non-SACKed skb has been checked */
2067 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2069 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2068 break; 2070 break;
2069 } 2071 }
2070 return 1; 2072 return true;
2071} 2073}
2072 2074
2073/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO 2075/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
@@ -2303,7 +2305,7 @@ void tcp_enter_loss(struct sock *sk, int how)
2303 * 2305 *
2304 * Do processing similar to RTO timeout. 2306 * Do processing similar to RTO timeout.
2305 */ 2307 */
2306static int tcp_check_sack_reneging(struct sock *sk, int flag) 2308static bool tcp_check_sack_reneging(struct sock *sk, int flag)
2307{ 2309{
2308 if (flag & FLAG_SACK_RENEGING) { 2310 if (flag & FLAG_SACK_RENEGING) {
2309 struct inet_connection_sock *icsk = inet_csk(sk); 2311 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2314,9 +2316,9 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
2314 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 2316 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
2315 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2317 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2316 icsk->icsk_rto, TCP_RTO_MAX); 2318 icsk->icsk_rto, TCP_RTO_MAX);
2317 return 1; 2319 return true;
2318 } 2320 }
2319 return 0; 2321 return false;
2320} 2322}
2321 2323
2322static inline int tcp_fackets_out(const struct tcp_sock *tp) 2324static inline int tcp_fackets_out(const struct tcp_sock *tp)
@@ -2472,28 +2474,28 @@ static inline int tcp_head_timedout(const struct sock *sk)
2472 * Main question: may we further continue forward transmission 2474 * Main question: may we further continue forward transmission
2473 * with the same cwnd? 2475 * with the same cwnd?
2474 */ 2476 */
2475static int tcp_time_to_recover(struct sock *sk, int flag) 2477static bool tcp_time_to_recover(struct sock *sk, int flag)
2476{ 2478{
2477 struct tcp_sock *tp = tcp_sk(sk); 2479 struct tcp_sock *tp = tcp_sk(sk);
2478 __u32 packets_out; 2480 __u32 packets_out;
2479 2481
2480 /* Do not perform any recovery during F-RTO algorithm */ 2482 /* Do not perform any recovery during F-RTO algorithm */
2481 if (tp->frto_counter) 2483 if (tp->frto_counter)
2482 return 0; 2484 return false;
2483 2485
2484 /* Trick#1: The loss is proven. */ 2486 /* Trick#1: The loss is proven. */
2485 if (tp->lost_out) 2487 if (tp->lost_out)
2486 return 1; 2488 return true;
2487 2489
2488 /* Not-A-Trick#2 : Classic rule... */ 2490 /* Not-A-Trick#2 : Classic rule... */
2489 if (tcp_dupack_heuristics(tp) > tp->reordering) 2491 if (tcp_dupack_heuristics(tp) > tp->reordering)
2490 return 1; 2492 return true;
2491 2493
2492 /* Trick#3 : when we use RFC2988 timer restart, fast 2494 /* Trick#3 : when we use RFC2988 timer restart, fast
2493 * retransmit can be triggered by timeout of queue head. 2495 * retransmit can be triggered by timeout of queue head.
2494 */ 2496 */
2495 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) 2497 if (tcp_is_fack(tp) && tcp_head_timedout(sk))
2496 return 1; 2498 return true;
2497 2499
2498 /* Trick#4: It is still not OK... But will it be useful to delay 2500 /* Trick#4: It is still not OK... But will it be useful to delay
2499 * recovery more? 2501 * recovery more?
@@ -2505,7 +2507,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
2505 /* We have nothing to send. This connection is limited 2507 /* We have nothing to send. This connection is limited
2506 * either by receiver window or by application. 2508 * either by receiver window or by application.
2507 */ 2509 */
2508 return 1; 2510 return true;
2509 } 2511 }
2510 2512
2511 /* If a thin stream is detected, retransmit after first 2513 /* If a thin stream is detected, retransmit after first
@@ -2516,7 +2518,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
2516 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && 2518 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
2517 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && 2519 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
2518 tcp_is_sack(tp) && !tcp_send_head(sk)) 2520 tcp_is_sack(tp) && !tcp_send_head(sk))
2519 return 1; 2521 return true;
2520 2522
2521 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious 2523 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
2522 * retransmissions due to small network reorderings, we implement 2524 * retransmissions due to small network reorderings, we implement
@@ -2528,7 +2530,7 @@ static int tcp_time_to_recover(struct sock *sk, int flag)
2528 !tcp_may_send_now(sk)) 2530 !tcp_may_send_now(sk))
2529 return !tcp_pause_early_retransmit(sk, flag); 2531 return !tcp_pause_early_retransmit(sk, flag);
2530 2532
2531 return 0; 2533 return false;
2532} 2534}
2533 2535
2534/* New heuristics: it is possible only after we switched to restart timer 2536/* New heuristics: it is possible only after we switched to restart timer
@@ -2767,7 +2769,7 @@ static inline int tcp_may_undo(const struct tcp_sock *tp)
2767} 2769}
2768 2770
2769/* People celebrate: "We love our President!" */ 2771/* People celebrate: "We love our President!" */
2770static int tcp_try_undo_recovery(struct sock *sk) 2772static bool tcp_try_undo_recovery(struct sock *sk)
2771{ 2773{
2772 struct tcp_sock *tp = tcp_sk(sk); 2774 struct tcp_sock *tp = tcp_sk(sk);
2773 2775
@@ -2792,10 +2794,10 @@ static int tcp_try_undo_recovery(struct sock *sk)
2792 * is ACKed. For Reno it is MUST to prevent false 2794 * is ACKed. For Reno it is MUST to prevent false
2793 * fast retransmits (RFC2582). SACK TCP is safe. */ 2795 * fast retransmits (RFC2582). SACK TCP is safe. */
2794 tcp_moderate_cwnd(tp); 2796 tcp_moderate_cwnd(tp);
2795 return 1; 2797 return true;
2796 } 2798 }
2797 tcp_set_ca_state(sk, TCP_CA_Open); 2799 tcp_set_ca_state(sk, TCP_CA_Open);
2798 return 0; 2800 return false;
2799} 2801}
2800 2802
2801/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2803/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
@@ -2825,19 +2827,19 @@ static void tcp_try_undo_dsack(struct sock *sk)
2825 * that successive retransmissions of a segment must not advance 2827 * that successive retransmissions of a segment must not advance
2826 * retrans_stamp under any conditions. 2828 * retrans_stamp under any conditions.
2827 */ 2829 */
2828static int tcp_any_retrans_done(const struct sock *sk) 2830static bool tcp_any_retrans_done(const struct sock *sk)
2829{ 2831{
2830 const struct tcp_sock *tp = tcp_sk(sk); 2832 const struct tcp_sock *tp = tcp_sk(sk);
2831 struct sk_buff *skb; 2833 struct sk_buff *skb;
2832 2834
2833 if (tp->retrans_out) 2835 if (tp->retrans_out)
2834 return 1; 2836 return true;
2835 2837
2836 skb = tcp_write_queue_head(sk); 2838 skb = tcp_write_queue_head(sk);
2837 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2839 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2838 return 1; 2840 return true;
2839 2841
2840 return 0; 2842 return false;
2841} 2843}
2842 2844
2843/* Undo during fast recovery after partial ACK. */ 2845/* Undo during fast recovery after partial ACK. */
@@ -2871,7 +2873,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2871} 2873}
2872 2874
2873/* Undo during loss recovery after partial ACK. */ 2875/* Undo during loss recovery after partial ACK. */
2874static int tcp_try_undo_loss(struct sock *sk) 2876static bool tcp_try_undo_loss(struct sock *sk)
2875{ 2877{
2876 struct tcp_sock *tp = tcp_sk(sk); 2878 struct tcp_sock *tp = tcp_sk(sk);
2877 2879
@@ -2893,9 +2895,9 @@ static int tcp_try_undo_loss(struct sock *sk)
2893 tp->undo_marker = 0; 2895 tp->undo_marker = 0;
2894 if (tcp_is_sack(tp)) 2896 if (tcp_is_sack(tp))
2895 tcp_set_ca_state(sk, TCP_CA_Open); 2897 tcp_set_ca_state(sk, TCP_CA_Open);
2896 return 1; 2898 return true;
2897 } 2899 }
2898 return 0; 2900 return false;
2899} 2901}
2900 2902
2901static inline void tcp_complete_cwr(struct sock *sk) 2903static inline void tcp_complete_cwr(struct sock *sk)
@@ -3370,7 +3372,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3370 const struct inet_connection_sock *icsk = inet_csk(sk); 3372 const struct inet_connection_sock *icsk = inet_csk(sk);
3371 struct sk_buff *skb; 3373 struct sk_buff *skb;
3372 u32 now = tcp_time_stamp; 3374 u32 now = tcp_time_stamp;
3373 int fully_acked = 1; 3375 int fully_acked = true;
3374 int flag = 0; 3376 int flag = 0;
3375 u32 pkts_acked = 0; 3377 u32 pkts_acked = 0;
3376 u32 reord = tp->packets_out; 3378 u32 reord = tp->packets_out;
@@ -3394,7 +3396,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3394 if (!acked_pcount) 3396 if (!acked_pcount)
3395 break; 3397 break;
3396 3398
3397 fully_acked = 0; 3399 fully_acked = false;
3398 } else { 3400 } else {
3399 acked_pcount = tcp_skb_pcount(skb); 3401 acked_pcount = tcp_skb_pcount(skb);
3400 } 3402 }
@@ -3673,7 +3675,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
3673 * to prove that the RTO is indeed spurious. It transfers the control 3675 * to prove that the RTO is indeed spurious. It transfers the control
3674 * from F-RTO to the conventional RTO recovery 3676 * from F-RTO to the conventional RTO recovery
3675 */ 3677 */
3676static int tcp_process_frto(struct sock *sk, int flag) 3678static bool tcp_process_frto(struct sock *sk, int flag)
3677{ 3679{
3678 struct tcp_sock *tp = tcp_sk(sk); 3680 struct tcp_sock *tp = tcp_sk(sk);
3679 3681
@@ -3689,7 +3691,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3689 3691
3690 if (!before(tp->snd_una, tp->frto_highmark)) { 3692 if (!before(tp->snd_una, tp->frto_highmark)) {
3691 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); 3693 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
3692 return 1; 3694 return true;
3693 } 3695 }
3694 3696
3695 if (!tcp_is_sackfrto(tp)) { 3697 if (!tcp_is_sackfrto(tp)) {
@@ -3698,19 +3700,19 @@ static int tcp_process_frto(struct sock *sk, int flag)
3698 * data, winupdate 3700 * data, winupdate
3699 */ 3701 */
3700 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) 3702 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
3701 return 1; 3703 return true;
3702 3704
3703 if (!(flag & FLAG_DATA_ACKED)) { 3705 if (!(flag & FLAG_DATA_ACKED)) {
3704 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), 3706 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
3705 flag); 3707 flag);
3706 return 1; 3708 return true;
3707 } 3709 }
3708 } else { 3710 } else {
3709 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3711 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3710 /* Prevent sending of new data. */ 3712 /* Prevent sending of new data. */
3711 tp->snd_cwnd = min(tp->snd_cwnd, 3713 tp->snd_cwnd = min(tp->snd_cwnd,
3712 tcp_packets_in_flight(tp)); 3714 tcp_packets_in_flight(tp));
3713 return 1; 3715 return true;
3714 } 3716 }
3715 3717
3716 if ((tp->frto_counter >= 2) && 3718 if ((tp->frto_counter >= 2) &&
@@ -3720,10 +3722,10 @@ static int tcp_process_frto(struct sock *sk, int flag)
3720 /* RFC4138 shortcoming (see comment above) */ 3722 /* RFC4138 shortcoming (see comment above) */
3721 if (!(flag & FLAG_FORWARD_PROGRESS) && 3723 if (!(flag & FLAG_FORWARD_PROGRESS) &&
3722 (flag & FLAG_NOT_DUP)) 3724 (flag & FLAG_NOT_DUP))
3723 return 1; 3725 return true;
3724 3726
3725 tcp_enter_frto_loss(sk, 3, flag); 3727 tcp_enter_frto_loss(sk, 3, flag);
3726 return 1; 3728 return true;
3727 } 3729 }
3728 } 3730 }
3729 3731
@@ -3735,7 +3737,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3735 if (!tcp_may_send_now(sk)) 3737 if (!tcp_may_send_now(sk))
3736 tcp_enter_frto_loss(sk, 2, flag); 3738 tcp_enter_frto_loss(sk, 2, flag);
3737 3739
3738 return 1; 3740 return true;
3739 } else { 3741 } else {
3740 switch (sysctl_tcp_frto_response) { 3742 switch (sysctl_tcp_frto_response) {
3741 case 2: 3743 case 2:
@@ -3752,7 +3754,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3752 tp->undo_marker = 0; 3754 tp->undo_marker = 0;
3753 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); 3755 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3754 } 3756 }
3755 return 0; 3757 return false;
3756} 3758}
3757 3759
3758/* This routine deals with incoming acks, but not outgoing ones. */ 3760/* This routine deals with incoming acks, but not outgoing ones. */
@@ -3770,7 +3772,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3770 int prior_sacked = tp->sacked_out; 3772 int prior_sacked = tp->sacked_out;
3771 int pkts_acked = 0; 3773 int pkts_acked = 0;
3772 int newly_acked_sacked = 0; 3774 int newly_acked_sacked = 0;
3773 int frto_cwnd = 0; 3775 bool frto_cwnd = false;
3774 3776
3775 /* If the ack is older than previous acks 3777 /* If the ack is older than previous acks
3776 * then we can probably ignore it. 3778 * then we can probably ignore it.
@@ -4025,7 +4027,7 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
4025} 4027}
4026EXPORT_SYMBOL(tcp_parse_options); 4028EXPORT_SYMBOL(tcp_parse_options);
4027 4029
4028static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 4030static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
4029{ 4031{
4030 const __be32 *ptr = (const __be32 *)(th + 1); 4032 const __be32 *ptr = (const __be32 *)(th + 1);
4031 4033
@@ -4036,31 +4038,31 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
4036 tp->rx_opt.rcv_tsval = ntohl(*ptr); 4038 tp->rx_opt.rcv_tsval = ntohl(*ptr);
4037 ++ptr; 4039 ++ptr;
4038 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 4040 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
4039 return 1; 4041 return true;
4040 } 4042 }
4041 return 0; 4043 return false;
4042} 4044}
4043 4045
4044/* Fast parse options. This hopes to only see timestamps. 4046/* Fast parse options. This hopes to only see timestamps.
4045 * If it is wrong it falls back on tcp_parse_options(). 4047 * If it is wrong it falls back on tcp_parse_options().
4046 */ 4048 */
4047static int tcp_fast_parse_options(const struct sk_buff *skb, 4049static bool tcp_fast_parse_options(const struct sk_buff *skb,
4048 const struct tcphdr *th, 4050 const struct tcphdr *th,
4049 struct tcp_sock *tp, const u8 **hvpp) 4051 struct tcp_sock *tp, const u8 **hvpp)
4050{ 4052{
4051 /* In the spirit of fast parsing, compare doff directly to constant 4053 /* In the spirit of fast parsing, compare doff directly to constant
4052 * values. Because equality is used, short doff can be ignored here. 4054 * values. Because equality is used, short doff can be ignored here.
4053 */ 4055 */
4054 if (th->doff == (sizeof(*th) / 4)) { 4056 if (th->doff == (sizeof(*th) / 4)) {
4055 tp->rx_opt.saw_tstamp = 0; 4057 tp->rx_opt.saw_tstamp = 0;
4056 return 0; 4058 return false;
4057 } else if (tp->rx_opt.tstamp_ok && 4059 } else if (tp->rx_opt.tstamp_ok &&
4058 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 4060 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
4059 if (tcp_parse_aligned_timestamp(tp, th)) 4061 if (tcp_parse_aligned_timestamp(tp, th))
4060 return 1; 4062 return true;
4061 } 4063 }
4062 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); 4064 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
4063 return 1; 4065 return true;
4064} 4066}
4065 4067
4066#ifdef CONFIG_TCP_MD5SIG 4068#ifdef CONFIG_TCP_MD5SIG
@@ -4301,7 +4303,7 @@ static void tcp_fin(struct sock *sk)
4301 } 4303 }
4302} 4304}
4303 4305
4304static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 4306static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4305 u32 end_seq) 4307 u32 end_seq)
4306{ 4308{
4307 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 4309 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
@@ -4309,9 +4311,9 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4309 sp->start_seq = seq; 4311 sp->start_seq = seq;
4310 if (after(end_seq, sp->end_seq)) 4312 if (after(end_seq, sp->end_seq))
4311 sp->end_seq = end_seq; 4313 sp->end_seq = end_seq;
4312 return 1; 4314 return true;
4313 } 4315 }
4314 return 0; 4316 return false;
4315} 4317}
4316 4318
4317static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4319static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
@@ -4507,7 +4509,7 @@ static void tcp_ofo_queue(struct sock *sk)
4507 } 4509 }
4508} 4510}
4509 4511
4510static int tcp_prune_ofo_queue(struct sock *sk); 4512static bool tcp_prune_ofo_queue(struct sock *sk);
4511static int tcp_prune_queue(struct sock *sk); 4513static int tcp_prune_queue(struct sock *sk);
4512 4514
4513static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) 4515static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
@@ -5092,10 +5094,10 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
5092 * Purge the out-of-order queue. 5094 * Purge the out-of-order queue.
5093 * Return true if queue was pruned. 5095 * Return true if queue was pruned.
5094 */ 5096 */
5095static int tcp_prune_ofo_queue(struct sock *sk) 5097static bool tcp_prune_ofo_queue(struct sock *sk)
5096{ 5098{
5097 struct tcp_sock *tp = tcp_sk(sk); 5099 struct tcp_sock *tp = tcp_sk(sk);
5098 int res = 0; 5100 bool res = false;
5099 5101
5100 if (!skb_queue_empty(&tp->out_of_order_queue)) { 5102 if (!skb_queue_empty(&tp->out_of_order_queue)) {
5101 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); 5103 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
@@ -5109,7 +5111,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
5109 if (tp->rx_opt.sack_ok) 5111 if (tp->rx_opt.sack_ok)
5110 tcp_sack_reset(&tp->rx_opt); 5112 tcp_sack_reset(&tp->rx_opt);
5111 sk_mem_reclaim(sk); 5113 sk_mem_reclaim(sk);
5112 res = 1; 5114 res = true;
5113 } 5115 }
5114 return res; 5116 return res;
5115} 5117}
@@ -5186,7 +5188,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
5186 tp->snd_cwnd_stamp = tcp_time_stamp; 5188 tp->snd_cwnd_stamp = tcp_time_stamp;
5187} 5189}
5188 5190
5189static int tcp_should_expand_sndbuf(const struct sock *sk) 5191static bool tcp_should_expand_sndbuf(const struct sock *sk)
5190{ 5192{
5191 const struct tcp_sock *tp = tcp_sk(sk); 5193 const struct tcp_sock *tp = tcp_sk(sk);
5192 5194
@@ -5194,21 +5196,21 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
5194 * not modify it. 5196 * not modify it.
5195 */ 5197 */
5196 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 5198 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
5197 return 0; 5199 return false;
5198 5200
5199 /* If we are under global TCP memory pressure, do not expand. */ 5201 /* If we are under global TCP memory pressure, do not expand. */
5200 if (sk_under_memory_pressure(sk)) 5202 if (sk_under_memory_pressure(sk))
5201 return 0; 5203 return false;
5202 5204
5203 /* If we are under soft global TCP memory pressure, do not expand. */ 5205 /* If we are under soft global TCP memory pressure, do not expand. */
5204 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 5206 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
5205 return 0; 5207 return false;
5206 5208
5207 /* If we filled the congestion window, do not expand. */ 5209 /* If we filled the congestion window, do not expand. */
5208 if (tp->packets_out >= tp->snd_cwnd) 5210 if (tp->packets_out >= tp->snd_cwnd)
5209 return 0; 5211 return false;
5210 5212
5211 return 1; 5213 return true;
5212} 5214}
5213 5215
5214/* When incoming ACK allowed to free some skb from write_queue, 5216/* When incoming ACK allowed to free some skb from write_queue,
@@ -5434,16 +5436,16 @@ static inline int tcp_checksum_complete_user(struct sock *sk,
5434} 5436}
5435 5437
5436#ifdef CONFIG_NET_DMA 5438#ifdef CONFIG_NET_DMA
5437static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, 5439static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5438 int hlen) 5440 int hlen)
5439{ 5441{
5440 struct tcp_sock *tp = tcp_sk(sk); 5442 struct tcp_sock *tp = tcp_sk(sk);
5441 int chunk = skb->len - hlen; 5443 int chunk = skb->len - hlen;
5442 int dma_cookie; 5444 int dma_cookie;
5443 int copied_early = 0; 5445 bool copied_early = false;
5444 5446
5445 if (tp->ucopy.wakeup) 5447 if (tp->ucopy.wakeup)
5446 return 0; 5448 return false;
5447 5449
5448 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5450 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5449 tp->ucopy.dma_chan = net_dma_find_channel(); 5451 tp->ucopy.dma_chan = net_dma_find_channel();
@@ -5459,7 +5461,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5459 goto out; 5461 goto out;
5460 5462
5461 tp->ucopy.dma_cookie = dma_cookie; 5463 tp->ucopy.dma_cookie = dma_cookie;
5462 copied_early = 1; 5464 copied_early = true;
5463 5465
5464 tp->ucopy.len -= chunk; 5466 tp->ucopy.len -= chunk;
5465 tp->copied_seq += chunk; 5467 tp->copied_seq += chunk;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 2e76ffb66d7c..a43b87dfe800 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -866,14 +866,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
866} 866}
867 867
868/* 868/*
869 * Return 1 if a syncookie should be sent 869 * Return true if a syncookie should be sent
870 */ 870 */
871int tcp_syn_flood_action(struct sock *sk, 871bool tcp_syn_flood_action(struct sock *sk,
872 const struct sk_buff *skb, 872 const struct sk_buff *skb,
873 const char *proto) 873 const char *proto)
874{ 874{
875 const char *msg = "Dropping request"; 875 const char *msg = "Dropping request";
876 int want_cookie = 0; 876 bool want_cookie = false;
877 struct listen_sock *lopt; 877 struct listen_sock *lopt;
878 878
879 879
@@ -881,7 +881,7 @@ int tcp_syn_flood_action(struct sock *sk,
881#ifdef CONFIG_SYN_COOKIES 881#ifdef CONFIG_SYN_COOKIES
882 if (sysctl_tcp_syncookies) { 882 if (sysctl_tcp_syncookies) {
883 msg = "Sending cookies"; 883 msg = "Sending cookies";
884 want_cookie = 1; 884 want_cookie = true;
885 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 885 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
886 } else 886 } else
887#endif 887#endif
@@ -1196,7 +1196,7 @@ clear_hash_noput:
1196} 1196}
1197EXPORT_SYMBOL(tcp_v4_md5_hash_skb); 1197EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1198 1198
1199static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) 1199static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1200{ 1200{
1201 /* 1201 /*
1202 * This gets called for each TCP segment that arrives 1202 * This gets called for each TCP segment that arrives
@@ -1219,16 +1219,16 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1219 1219
1220 /* We've parsed the options - do we have a hash? */ 1220 /* We've parsed the options - do we have a hash? */
1221 if (!hash_expected && !hash_location) 1221 if (!hash_expected && !hash_location)
1222 return 0; 1222 return false;
1223 1223
1224 if (hash_expected && !hash_location) { 1224 if (hash_expected && !hash_location) {
1225 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 1225 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1226 return 1; 1226 return true;
1227 } 1227 }
1228 1228
1229 if (!hash_expected && hash_location) { 1229 if (!hash_expected && hash_location) {
1230 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 1230 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1231 return 1; 1231 return true;
1232 } 1232 }
1233 1233
1234 /* Okay, so this is hash_expected and hash_location - 1234 /* Okay, so this is hash_expected and hash_location -
@@ -1244,9 +1244,9 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1244 &iph->daddr, ntohs(th->dest), 1244 &iph->daddr, ntohs(th->dest),
1245 genhash ? " tcp_v4_calc_md5_hash failed" 1245 genhash ? " tcp_v4_calc_md5_hash failed"
1246 : ""); 1246 : "");
1247 return 1; 1247 return true;
1248 } 1248 }
1249 return 0; 1249 return false;
1250} 1250}
1251 1251
1252#endif 1252#endif
@@ -1280,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1280 __be32 saddr = ip_hdr(skb)->saddr; 1280 __be32 saddr = ip_hdr(skb)->saddr;
1281 __be32 daddr = ip_hdr(skb)->daddr; 1281 __be32 daddr = ip_hdr(skb)->daddr;
1282 __u32 isn = TCP_SKB_CB(skb)->when; 1282 __u32 isn = TCP_SKB_CB(skb)->when;
1283 int want_cookie = 0; 1283 bool want_cookie = false;
1284 1284
1285 /* Never answer to SYNs send to broadcast or multicast */ 1285 /* Never answer to SYNs send to broadcast or multicast */
1286 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 1286 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1339,7 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1339 while (l-- > 0) 1339 while (l-- > 0)
1340 *c++ ^= *hash_location++; 1340 *c++ ^= *hash_location++;
1341 1341
1342 want_cookie = 0; /* not our kind of cookie */ 1342 want_cookie = false; /* not our kind of cookie */
1343 tmp_ext.cookie_out_never = 0; /* false */ 1343 tmp_ext.cookie_out_never = 0; /* false */
1344 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1344 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1345 } else if (!tp->rx_opt.cookie_in_always) { 1345 } else if (!tp->rx_opt.cookie_in_always) {
@@ -2073,7 +2073,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2073 return rc; 2073 return rc;
2074} 2074}
2075 2075
2076static inline int empty_bucket(struct tcp_iter_state *st) 2076static inline bool empty_bucket(struct tcp_iter_state *st)
2077{ 2077{
2078 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) && 2078 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2079 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); 2079 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6f6a91832826..b85d9fe7d663 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(tcp_death_row);
55 * state. 55 * state.
56 */ 56 */
57 57
58static int tcp_remember_stamp(struct sock *sk) 58static bool tcp_remember_stamp(struct sock *sk)
59{ 59{
60 const struct inet_connection_sock *icsk = inet_csk(sk); 60 const struct inet_connection_sock *icsk = inet_csk(sk);
61 struct tcp_sock *tp = tcp_sk(sk); 61 struct tcp_sock *tp = tcp_sk(sk);
@@ -72,13 +72,13 @@ static int tcp_remember_stamp(struct sock *sk)
72 } 72 }
73 if (release_it) 73 if (release_it)
74 inet_putpeer(peer); 74 inet_putpeer(peer);
75 return 1; 75 return true;
76 } 76 }
77 77
78 return 0; 78 return false;
79} 79}
80 80
81static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw) 81static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
82{ 82{
83 struct sock *sk = (struct sock *) tw; 83 struct sock *sk = (struct sock *) tw;
84 struct inet_peer *peer; 84 struct inet_peer *peer;
@@ -94,17 +94,17 @@ static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
94 peer->tcp_ts = tcptw->tw_ts_recent; 94 peer->tcp_ts = tcptw->tw_ts_recent;
95 } 95 }
96 inet_putpeer(peer); 96 inet_putpeer(peer);
97 return 1; 97 return true;
98 } 98 }
99 return 0; 99 return false;
100} 100}
101 101
102static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 102static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
103{ 103{
104 if (seq == s_win) 104 if (seq == s_win)
105 return 1; 105 return true;
106 if (after(end_seq, s_win) && before(seq, e_win)) 106 if (after(end_seq, s_win) && before(seq, e_win))
107 return 1; 107 return true;
108 return seq == e_win && seq == end_seq; 108 return seq == e_win && seq == end_seq;
109} 109}
110 110
@@ -143,7 +143,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
143 struct tcp_options_received tmp_opt; 143 struct tcp_options_received tmp_opt;
144 const u8 *hash_location; 144 const u8 *hash_location;
145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
146 int paws_reject = 0; 146 bool paws_reject = false;
147 147
148 tmp_opt.saw_tstamp = 0; 148 tmp_opt.saw_tstamp = 0;
149 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 149 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
316 struct inet_timewait_sock *tw = NULL; 316 struct inet_timewait_sock *tw = NULL;
317 const struct inet_connection_sock *icsk = inet_csk(sk); 317 const struct inet_connection_sock *icsk = inet_csk(sk);
318 const struct tcp_sock *tp = tcp_sk(sk); 318 const struct tcp_sock *tp = tcp_sk(sk);
319 int recycle_ok = 0; 319 bool recycle_ok = false;
320 320
321 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) 321 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
322 recycle_ok = tcp_remember_stamp(sk); 322 recycle_ok = tcp_remember_stamp(sk);
@@ -575,7 +575,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
575 struct sock *child; 575 struct sock *child;
576 const struct tcphdr *th = tcp_hdr(skb); 576 const struct tcphdr *th = tcp_hdr(skb);
577 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 577 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
578 int paws_reject = 0; 578 bool paws_reject = false;
579 579
580 tmp_opt.saw_tstamp = 0; 580 tmp_opt.saw_tstamp = 0;
581 if (th->doff > (sizeof(struct tcphdr)>>2)) { 581 if (th->doff > (sizeof(struct tcphdr)>>2)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1a630825c45b..803cbfe82fbc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -370,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
370 TCP_SKB_CB(skb)->end_seq = seq; 370 TCP_SKB_CB(skb)->end_seq = seq;
371} 371}
372 372
373static inline int tcp_urg_mode(const struct tcp_sock *tp) 373static inline bool tcp_urg_mode(const struct tcp_sock *tp)
374{ 374{
375 return tp->snd_una != tp->snd_up; 375 return tp->snd_una != tp->snd_up;
376} 376}
@@ -1391,20 +1391,20 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1391} 1391}
1392 1392
1393/* Minshall's variant of the Nagle send check. */ 1393/* Minshall's variant of the Nagle send check. */
1394static inline int tcp_minshall_check(const struct tcp_sock *tp) 1394static inline bool tcp_minshall_check(const struct tcp_sock *tp)
1395{ 1395{
1396 return after(tp->snd_sml, tp->snd_una) && 1396 return after(tp->snd_sml, tp->snd_una) &&
1397 !after(tp->snd_sml, tp->snd_nxt); 1397 !after(tp->snd_sml, tp->snd_nxt);
1398} 1398}
1399 1399
1400/* Return 0, if packet can be sent now without violation Nagle's rules: 1400/* Return false, if packet can be sent now without violation Nagle's rules:
1401 * 1. It is full sized. 1401 * 1. It is full sized.
1402 * 2. Or it contains FIN. (already checked by caller) 1402 * 2. Or it contains FIN. (already checked by caller)
1403 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1403 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1404 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1404 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1405 * With Minshall's modification: all sent small packets are ACKed. 1405 * With Minshall's modification: all sent small packets are ACKed.
1406 */ 1406 */
1407static inline int tcp_nagle_check(const struct tcp_sock *tp, 1407static inline bool tcp_nagle_check(const struct tcp_sock *tp,
1408 const struct sk_buff *skb, 1408 const struct sk_buff *skb,
1409 unsigned int mss_now, int nonagle) 1409 unsigned int mss_now, int nonagle)
1410{ 1410{
@@ -1413,11 +1413,11 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
1413 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1413 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1414} 1414}
1415 1415
1416/* Return non-zero if the Nagle test allows this packet to be 1416/* Return true if the Nagle test allows this packet to be
1417 * sent now. 1417 * sent now.
1418 */ 1418 */
1419static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1419static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1420 unsigned int cur_mss, int nonagle) 1420 unsigned int cur_mss, int nonagle)
1421{ 1421{
1422 /* Nagle rule does not apply to frames, which sit in the middle of the 1422 /* Nagle rule does not apply to frames, which sit in the middle of the
1423 * write_queue (they have no chances to get new data). 1423 * write_queue (they have no chances to get new data).
@@ -1426,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff
1426 * argument based upon the location of SKB in the send queue. 1426 * argument based upon the location of SKB in the send queue.
1427 */ 1427 */
1428 if (nonagle & TCP_NAGLE_PUSH) 1428 if (nonagle & TCP_NAGLE_PUSH)
1429 return 1; 1429 return true;
1430 1430
1431 /* Don't use the nagle rule for urgent data (or for the final FIN). 1431 /* Don't use the nagle rule for urgent data (or for the final FIN).
1432 * Nagle can be ignored during F-RTO too (see RFC4138). 1432 * Nagle can be ignored during F-RTO too (see RFC4138).
1433 */ 1433 */
1434 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1434 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1435 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1435 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1436 return 1; 1436 return true;
1437 1437
1438 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1438 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1439 return 1; 1439 return true;
1440 1440
1441 return 0; 1441 return false;
1442} 1442}
1443 1443
1444/* Does at least the first segment of SKB fit into the send window? */ 1444/* Does at least the first segment of SKB fit into the send window? */
1445static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1445static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1446 unsigned int cur_mss) 1446 const struct sk_buff *skb,
1447 unsigned int cur_mss)
1447{ 1448{
1448 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1449 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1449 1450
@@ -1476,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1476} 1477}
1477 1478
1478/* Test if sending is allowed right now. */ 1479/* Test if sending is allowed right now. */
1479int tcp_may_send_now(struct sock *sk) 1480bool tcp_may_send_now(struct sock *sk)
1480{ 1481{
1481 const struct tcp_sock *tp = tcp_sk(sk); 1482 const struct tcp_sock *tp = tcp_sk(sk);
1482 struct sk_buff *skb = tcp_send_head(sk); 1483 struct sk_buff *skb = tcp_send_head(sk);
@@ -1546,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1546 * 1547 *
1547 * This algorithm is from John Heffner. 1548 * This algorithm is from John Heffner.
1548 */ 1549 */
1549static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1550static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1550{ 1551{
1551 struct tcp_sock *tp = tcp_sk(sk); 1552 struct tcp_sock *tp = tcp_sk(sk);
1552 const struct inet_connection_sock *icsk = inet_csk(sk); 1553 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1606,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1606 /* Ok, it looks like it is advisable to defer. */ 1607 /* Ok, it looks like it is advisable to defer. */
1607 tp->tso_deferred = 1 | (jiffies << 1); 1608 tp->tso_deferred = 1 | (jiffies << 1);
1608 1609
1609 return 1; 1610 return true;
1610 1611
1611send_now: 1612send_now:
1612 tp->tso_deferred = 0; 1613 tp->tso_deferred = 0;
1613 return 0; 1614 return false;
1614} 1615}
1615 1616
1616/* Create a new MTU probe if we are ready. 1617/* Create a new MTU probe if we are ready.
@@ -1752,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk)
1752 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1753 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1753 * account rare use of URG, this is not a big flaw. 1754 * account rare use of URG, this is not a big flaw.
1754 * 1755 *
1755 * Returns 1, if no segments are in flight and we have queued segments, but 1756 * Returns true, if no segments are in flight and we have queued segments,
1756 * cannot send anything now because of SWS or another problem. 1757 * but cannot send anything now because of SWS or another problem.
1757 */ 1758 */
1758static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1759static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1759 int push_one, gfp_t gfp) 1760 int push_one, gfp_t gfp)
1760{ 1761{
1761 struct tcp_sock *tp = tcp_sk(sk); 1762 struct tcp_sock *tp = tcp_sk(sk);
1762 struct sk_buff *skb; 1763 struct sk_buff *skb;
@@ -1770,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1770 /* Do MTU probing. */ 1771 /* Do MTU probing. */
1771 result = tcp_mtu_probe(sk); 1772 result = tcp_mtu_probe(sk);
1772 if (!result) { 1773 if (!result) {
1773 return 0; 1774 return false;
1774 } else if (result > 0) { 1775 } else if (result > 0) {
1775 sent_pkts = 1; 1776 sent_pkts = 1;
1776 } 1777 }
@@ -1829,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1829 1830
1830 if (likely(sent_pkts)) { 1831 if (likely(sent_pkts)) {
1831 tcp_cwnd_validate(sk); 1832 tcp_cwnd_validate(sk);
1832 return 0; 1833 return false;
1833 } 1834 }
1834 return !tp->packets_out && tcp_send_head(sk); 1835 return !tp->packets_out && tcp_send_head(sk);
1835} 1836}
@@ -2028,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2028} 2029}
2029 2030
2030/* Check if coalescing SKBs is legal. */ 2031/* Check if coalescing SKBs is legal. */
2031static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 2032static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2032{ 2033{
2033 if (tcp_skb_pcount(skb) > 1) 2034 if (tcp_skb_pcount(skb) > 1)
2034 return 0; 2035 return false;
2035 /* TODO: SACK collapsing could be used to remove this condition */ 2036 /* TODO: SACK collapsing could be used to remove this condition */
2036 if (skb_shinfo(skb)->nr_frags != 0) 2037 if (skb_shinfo(skb)->nr_frags != 0)
2037 return 0; 2038 return false;
2038 if (skb_cloned(skb)) 2039 if (skb_cloned(skb))
2039 return 0; 2040 return false;
2040 if (skb == tcp_send_head(sk)) 2041 if (skb == tcp_send_head(sk))
2041 return 0; 2042 return false;
2042 /* Some heurestics for collapsing over SACK'd could be invented */ 2043 /* Some heurestics for collapsing over SACK'd could be invented */
2043 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2044 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2044 return 0; 2045 return false;
2045 2046
2046 return 1; 2047 return true;
2047} 2048}
2048 2049
2049/* Collapse packets in the retransmit queue to make to create 2050/* Collapse packets in the retransmit queue to make to create
@@ -2054,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2054{ 2055{
2055 struct tcp_sock *tp = tcp_sk(sk); 2056 struct tcp_sock *tp = tcp_sk(sk);
2056 struct sk_buff *skb = to, *tmp; 2057 struct sk_buff *skb = to, *tmp;
2057 int first = 1; 2058 bool first = true;
2058 2059
2059 if (!sysctl_tcp_retrans_collapse) 2060 if (!sysctl_tcp_retrans_collapse)
2060 return; 2061 return;
@@ -2068,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2068 space -= skb->len; 2069 space -= skb->len;
2069 2070
2070 if (first) { 2071 if (first) {
2071 first = 0; 2072 first = false;
2072 continue; 2073 continue;
2073 } 2074 }
2074 2075
@@ -2208,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2208/* Check if we forward retransmits are possible in the current 2209/* Check if we forward retransmits are possible in the current
2209 * window/congestion state. 2210 * window/congestion state.
2210 */ 2211 */
2211static int tcp_can_forward_retransmit(struct sock *sk) 2212static bool tcp_can_forward_retransmit(struct sock *sk)
2212{ 2213{
2213 const struct inet_connection_sock *icsk = inet_csk(sk); 2214 const struct inet_connection_sock *icsk = inet_csk(sk);
2214 const struct tcp_sock *tp = tcp_sk(sk); 2215 const struct tcp_sock *tp = tcp_sk(sk);
2215 2216
2216 /* Forward retransmissions are possible only during Recovery. */ 2217 /* Forward retransmissions are possible only during Recovery. */
2217 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2218 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2218 return 0; 2219 return false;
2219 2220
2220 /* No forward retransmissions in Reno are possible. */ 2221 /* No forward retransmissions in Reno are possible. */
2221 if (tcp_is_reno(tp)) 2222 if (tcp_is_reno(tp))
2222 return 0; 2223 return false;
2223 2224
2224 /* Yeah, we have to make difficult choice between forward transmission 2225 /* Yeah, we have to make difficult choice between forward transmission
2225 * and retransmission... Both ways have their merits... 2226 * and retransmission... Both ways have their merits...
@@ -2230,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk)
2230 */ 2231 */
2231 2232
2232 if (tcp_may_send_now(sk)) 2233 if (tcp_may_send_now(sk))
2233 return 0; 2234 return false;
2234 2235
2235 return 1; 2236 return true;
2236} 2237}
2237 2238
2238/* This gets called after a retransmit timeout, and the initially 2239/* This gets called after a retransmit timeout, and the initially
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4cf55ae7bf80..554d5999abc4 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1055,7 +1055,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1055 struct tcp_sock *tp = tcp_sk(sk); 1055 struct tcp_sock *tp = tcp_sk(sk);
1056 __u32 isn = TCP_SKB_CB(skb)->when; 1056 __u32 isn = TCP_SKB_CB(skb)->when;
1057 struct dst_entry *dst = NULL; 1057 struct dst_entry *dst = NULL;
1058 int want_cookie = 0; 1058 bool want_cookie = false;
1059 1059
1060 if (skb->protocol == htons(ETH_P_IP)) 1060 if (skb->protocol == htons(ETH_P_IP))
1061 return tcp_v4_conn_request(sk, skb); 1061 return tcp_v4_conn_request(sk, skb);
@@ -1116,7 +1116,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1116 while (l-- > 0) 1116 while (l-- > 0)
1117 *c++ ^= *hash_location++; 1117 *c++ ^= *hash_location++;
1118 1118
1119 want_cookie = 0; /* not our kind of cookie */ 1119 want_cookie = false; /* not our kind of cookie */
1120 tmp_ext.cookie_out_never = 0; /* false */ 1120 tmp_ext.cookie_out_never = 0; /* false */
1121 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1121 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1122 } else if (!tp->rx_opt.cookie_in_always) { 1122 } else if (!tp->rx_opt.cookie_in_always) {