aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-16 19:15:34 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-17 14:59:59 -0400
commita2a385d627e1549da4b43a8b3dfe370589766e1c (patch)
treed61e9913497c6c14406032f6a0822738707f1abf /include/net
parente005d193d55ee5f757b13306112d8c23aac27a88 (diff)
tcp: bool conversions
bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/tcp.h56
1 files changed, 28 insertions, 28 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index aaf5de9448c9..e79aa48d9fc1 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -263,14 +263,14 @@ extern int tcp_memory_pressure;
263 * and worry about wraparound (automatic with unsigned arithmetic). 263 * and worry about wraparound (automatic with unsigned arithmetic).
264 */ 264 */
265 265
266static inline int before(__u32 seq1, __u32 seq2) 266static inline bool before(__u32 seq1, __u32 seq2)
267{ 267{
268 return (__s32)(seq1-seq2) < 0; 268 return (__s32)(seq1-seq2) < 0;
269} 269}
270#define after(seq2, seq1) before(seq1, seq2) 270#define after(seq2, seq1) before(seq1, seq2)
271 271
272/* is s2<=s1<=s3 ? */ 272/* is s2<=s1<=s3 ? */
273static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) 273static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
274{ 274{
275 return seq3 - seq2 >= seq1 - seq2; 275 return seq3 - seq2 >= seq1 - seq2;
276} 276}
@@ -305,7 +305,7 @@ static inline void tcp_synq_overflow(struct sock *sk)
305} 305}
306 306
307/* syncookies: no recent synqueue overflow on this listening socket? */ 307/* syncookies: no recent synqueue overflow on this listening socket? */
308static inline int tcp_synq_no_recent_overflow(const struct sock *sk) 308static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
309{ 309{
310 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 310 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
311 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK); 311 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
@@ -383,7 +383,7 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
383 struct request_sock **prev); 383 struct request_sock **prev);
384extern int tcp_child_process(struct sock *parent, struct sock *child, 384extern int tcp_child_process(struct sock *parent, struct sock *child,
385 struct sk_buff *skb); 385 struct sk_buff *skb);
386extern int tcp_use_frto(struct sock *sk); 386extern bool tcp_use_frto(struct sock *sk);
387extern void tcp_enter_frto(struct sock *sk); 387extern void tcp_enter_frto(struct sock *sk);
388extern void tcp_enter_loss(struct sock *sk, int how); 388extern void tcp_enter_loss(struct sock *sk, int how);
389extern void tcp_clear_retrans(struct tcp_sock *tp); 389extern void tcp_clear_retrans(struct tcp_sock *tp);
@@ -470,7 +470,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
470 470
471extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 471extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
472 int nonagle); 472 int nonagle);
473extern int tcp_may_send_now(struct sock *sk); 473extern bool tcp_may_send_now(struct sock *sk);
474extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); 474extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
475extern void tcp_retransmit_timer(struct sock *sk); 475extern void tcp_retransmit_timer(struct sock *sk);
476extern void tcp_xmit_retransmit_queue(struct sock *); 476extern void tcp_xmit_retransmit_queue(struct sock *);
@@ -484,9 +484,9 @@ extern int tcp_write_wakeup(struct sock *);
484extern void tcp_send_fin(struct sock *sk); 484extern void tcp_send_fin(struct sock *sk);
485extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); 485extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
486extern int tcp_send_synack(struct sock *); 486extern int tcp_send_synack(struct sock *);
487extern int tcp_syn_flood_action(struct sock *sk, 487extern bool tcp_syn_flood_action(struct sock *sk,
488 const struct sk_buff *skb, 488 const struct sk_buff *skb,
489 const char *proto); 489 const char *proto);
490extern void tcp_push_one(struct sock *, unsigned int mss_now); 490extern void tcp_push_one(struct sock *, unsigned int mss_now);
491extern void tcp_send_ack(struct sock *sk); 491extern void tcp_send_ack(struct sock *sk);
492extern void tcp_send_delayed_ack(struct sock *sk); 492extern void tcp_send_delayed_ack(struct sock *sk);
@@ -794,12 +794,12 @@ static inline int tcp_is_sack(const struct tcp_sock *tp)
794 return tp->rx_opt.sack_ok; 794 return tp->rx_opt.sack_ok;
795} 795}
796 796
797static inline int tcp_is_reno(const struct tcp_sock *tp) 797static inline bool tcp_is_reno(const struct tcp_sock *tp)
798{ 798{
799 return !tcp_is_sack(tp); 799 return !tcp_is_sack(tp);
800} 800}
801 801
802static inline int tcp_is_fack(const struct tcp_sock *tp) 802static inline bool tcp_is_fack(const struct tcp_sock *tp)
803{ 803{
804 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED; 804 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
805} 805}
@@ -901,7 +901,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
901{ 901{
902 return tp->snd_una + tp->snd_wnd; 902 return tp->snd_una + tp->snd_wnd;
903} 903}
904extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); 904extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
905 905
906static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, 906static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
907 const struct sk_buff *skb) 907 const struct sk_buff *skb)
@@ -944,7 +944,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
944 return __skb_checksum_complete(skb); 944 return __skb_checksum_complete(skb);
945} 945}
946 946
947static inline int tcp_checksum_complete(struct sk_buff *skb) 947static inline bool tcp_checksum_complete(struct sk_buff *skb)
948{ 948{
949 return !skb_csum_unnecessary(skb) && 949 return !skb_csum_unnecessary(skb) &&
950 __tcp_checksum_complete(skb); 950 __tcp_checksum_complete(skb);
@@ -974,12 +974,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
974 * 974 *
975 * NOTE: is this not too big to inline? 975 * NOTE: is this not too big to inline?
976 */ 976 */
977static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) 977static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
978{ 978{
979 struct tcp_sock *tp = tcp_sk(sk); 979 struct tcp_sock *tp = tcp_sk(sk);
980 980
981 if (sysctl_tcp_low_latency || !tp->ucopy.task) 981 if (sysctl_tcp_low_latency || !tp->ucopy.task)
982 return 0; 982 return false;
983 983
984 __skb_queue_tail(&tp->ucopy.prequeue, skb); 984 __skb_queue_tail(&tp->ucopy.prequeue, skb);
985 tp->ucopy.memory += skb->truesize; 985 tp->ucopy.memory += skb->truesize;
@@ -1003,7 +1003,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1003 (3 * tcp_rto_min(sk)) / 4, 1003 (3 * tcp_rto_min(sk)) / 4,
1004 TCP_RTO_MAX); 1004 TCP_RTO_MAX);
1005 } 1005 }
1006 return 1; 1006 return true;
1007} 1007}
1008 1008
1009 1009
@@ -1108,28 +1108,28 @@ static inline int tcp_fin_time(const struct sock *sk)
1108 return fin_timeout; 1108 return fin_timeout;
1109} 1109}
1110 1110
1111static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, 1111static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1112 int paws_win) 1112 int paws_win)
1113{ 1113{
1114 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1114 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1115 return 1; 1115 return true;
1116 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) 1116 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1117 return 1; 1117 return true;
1118 /* 1118 /*
1119 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1119 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1120 * then following tcp messages have valid values. Ignore 0 value, 1120 * then following tcp messages have valid values. Ignore 0 value,
1121 * or else 'negative' tsval might forbid us to accept their packets. 1121 * or else 'negative' tsval might forbid us to accept their packets.
1122 */ 1122 */
1123 if (!rx_opt->ts_recent) 1123 if (!rx_opt->ts_recent)
1124 return 1; 1124 return true;
1125 return 0; 1125 return false;
1126} 1126}
1127 1127
1128static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, 1128static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1129 int rst) 1129 int rst)
1130{ 1130{
1131 if (tcp_paws_check(rx_opt, 0)) 1131 if (tcp_paws_check(rx_opt, 0))
1132 return 0; 1132 return false;
1133 1133
1134 /* RST segments are not recommended to carry timestamp, 1134 /* RST segments are not recommended to carry timestamp,
1135 and, if they do, it is recommended to ignore PAWS because 1135 and, if they do, it is recommended to ignore PAWS because
@@ -1144,8 +1144,8 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1144 However, we can relax time bounds for RST segments to MSL. 1144 However, we can relax time bounds for RST segments to MSL.
1145 */ 1145 */
1146 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) 1146 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1147 return 0; 1147 return false;
1148 return 1; 1148 return true;
1149} 1149}
1150 1150
1151static inline void tcp_mib_init(struct net *net) 1151static inline void tcp_mib_init(struct net *net)
@@ -1383,7 +1383,7 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1383 __skb_unlink(skb, &sk->sk_write_queue); 1383 __skb_unlink(skb, &sk->sk_write_queue);
1384} 1384}
1385 1385
1386static inline int tcp_write_queue_empty(struct sock *sk) 1386static inline bool tcp_write_queue_empty(struct sock *sk)
1387{ 1387{
1388 return skb_queue_empty(&sk->sk_write_queue); 1388 return skb_queue_empty(&sk->sk_write_queue);
1389} 1389}
@@ -1440,7 +1440,7 @@ static inline void tcp_highest_sack_combine(struct sock *sk,
1440/* Determines whether this is a thin stream (which may suffer from 1440/* Determines whether this is a thin stream (which may suffer from
1441 * increased latency). Used to trigger latency-reducing mechanisms. 1441 * increased latency). Used to trigger latency-reducing mechanisms.
1442 */ 1442 */
1443static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp) 1443static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1444{ 1444{
1445 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); 1445 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1446} 1446}