aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-16 19:15:34 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-17 14:59:59 -0400
commita2a385d627e1549da4b43a8b3dfe370589766e1c (patch)
treed61e9913497c6c14406032f6a0822738707f1abf /net/ipv4/tcp.c
parente005d193d55ee5f757b13306112d8c23aac27a88 (diff)
tcp: bool conversions
bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e8a80d0b5b3c..63ddaee7209f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
593 tp->pushed_seq = tp->write_seq; 593 tp->pushed_seq = tp->write_seq;
594} 594}
595 595
596static inline int forced_push(const struct tcp_sock *tp) 596static inline bool forced_push(const struct tcp_sock *tp)
597{ 597{
598 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 598 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
599} 599}
@@ -1082,7 +1082,7 @@ new_segment:
1082 if (err) 1082 if (err)
1083 goto do_fault; 1083 goto do_fault;
1084 } else { 1084 } else {
1085 int merge = 0; 1085 bool merge = false;
1086 int i = skb_shinfo(skb)->nr_frags; 1086 int i = skb_shinfo(skb)->nr_frags;
1087 struct page *page = sk->sk_sndmsg_page; 1087 struct page *page = sk->sk_sndmsg_page;
1088 int off; 1088 int off;
@@ -1096,7 +1096,7 @@ new_segment:
1096 off != PAGE_SIZE) { 1096 off != PAGE_SIZE) {
1097 /* We can extend the last page 1097 /* We can extend the last page
1098 * fragment. */ 1098 * fragment. */
1099 merge = 1; 1099 merge = true;
1100 } else if (i == MAX_SKB_FRAGS || !sg) { 1100 } else if (i == MAX_SKB_FRAGS || !sg) {
1101 /* Need to add new fragment and cannot 1101 /* Need to add new fragment and cannot
1102 * do this because interface is non-SG, 1102 * do this because interface is non-SG,
@@ -1293,7 +1293,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1293void tcp_cleanup_rbuf(struct sock *sk, int copied) 1293void tcp_cleanup_rbuf(struct sock *sk, int copied)
1294{ 1294{
1295 struct tcp_sock *tp = tcp_sk(sk); 1295 struct tcp_sock *tp = tcp_sk(sk);
1296 int time_to_ack = 0; 1296 bool time_to_ack = false;
1297 1297
1298 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1298 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1299 1299
@@ -1319,7 +1319,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1319 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1319 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1320 !icsk->icsk_ack.pingpong)) && 1320 !icsk->icsk_ack.pingpong)) &&
1321 !atomic_read(&sk->sk_rmem_alloc))) 1321 !atomic_read(&sk->sk_rmem_alloc)))
1322 time_to_ack = 1; 1322 time_to_ack = true;
1323 } 1323 }
1324 1324
1325 /* We send an ACK if we can now advertise a non-zero window 1325 /* We send an ACK if we can now advertise a non-zero window
@@ -1341,7 +1341,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1341 * "Lots" means "at least twice" here. 1341 * "Lots" means "at least twice" here.
1342 */ 1342 */
1343 if (new_window && new_window >= 2 * rcv_window_now) 1343 if (new_window && new_window >= 2 * rcv_window_now)
1344 time_to_ack = 1; 1344 time_to_ack = true;
1345 } 1345 }
1346 } 1346 }
1347 if (time_to_ack) 1347 if (time_to_ack)
@@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(tcp_close);
2171 2171
2172/* These states need RST on ABORT according to RFC793 */ 2172/* These states need RST on ABORT according to RFC793 */
2173 2173
2174static inline int tcp_need_reset(int state) 2174static inline bool tcp_need_reset(int state)
2175{ 2175{
2176 return (1 << state) & 2176 return (1 << state) &
2177 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2177 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
@@ -2245,7 +2245,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2245} 2245}
2246EXPORT_SYMBOL(tcp_disconnect); 2246EXPORT_SYMBOL(tcp_disconnect);
2247 2247
2248static inline int tcp_can_repair_sock(struct sock *sk) 2248static inline bool tcp_can_repair_sock(const struct sock *sk)
2249{ 2249{
2250 return capable(CAP_NET_ADMIN) && 2250 return capable(CAP_NET_ADMIN) &&
2251 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); 2251 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
@@ -3172,13 +3172,13 @@ out_free:
3172struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) 3172struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
3173{ 3173{
3174 struct tcp_md5sig_pool __percpu *pool; 3174 struct tcp_md5sig_pool __percpu *pool;
3175 int alloc = 0; 3175 bool alloc = false;
3176 3176
3177retry: 3177retry:
3178 spin_lock_bh(&tcp_md5sig_pool_lock); 3178 spin_lock_bh(&tcp_md5sig_pool_lock);
3179 pool = tcp_md5sig_pool; 3179 pool = tcp_md5sig_pool;
3180 if (tcp_md5sig_users++ == 0) { 3180 if (tcp_md5sig_users++ == 0) {
3181 alloc = 1; 3181 alloc = true;
3182 spin_unlock_bh(&tcp_md5sig_pool_lock); 3182 spin_unlock_bh(&tcp_md5sig_pool_lock);
3183 } else if (!pool) { 3183 } else if (!pool) {
3184 tcp_md5sig_users--; 3184 tcp_md5sig_users--;