aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIan Morris <ipm@chirality.org.uk>2015-04-03 04:17:26 -0400
committerDavid S. Miller <davem@davemloft.net>2015-04-03 12:11:15 -0400
commit51456b2914a34d16b1255b7c55d5cbf6a681d306 (patch)
treeb8f1135150269f591105f787fbf7c7d8c2307d3e /net/ipv4/tcp_input.c
parent11a9c7821c583aa22b35f37fba20539def9e8f14 (diff)
ipv4: coding style: comparison for equality with NULL
The ipv4 code uses a mixture of coding styles. In some instances check for NULL pointer is done as x == NULL and sometimes as !x. !x is preferred according to checkpatch and this patch makes the code consistent by adopting the latter form. No changes detected by objdiff. Signed-off-by: Ian Morris <ipm@chirality.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 18b80e8bc533..1fd283684303 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
866/* This must be called before lost_out is incremented */ 866/* This must be called before lost_out is incremented */
867static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 867static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
868{ 868{
869 if ((tp->retransmit_skb_hint == NULL) || 869 if (!tp->retransmit_skb_hint ||
870 before(TCP_SKB_CB(skb)->seq, 870 before(TCP_SKB_CB(skb)->seq,
871 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 871 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
872 tp->retransmit_skb_hint = skb; 872 tp->retransmit_skb_hint = skb;
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1614 struct tcp_sacktag_state *state, 1614 struct tcp_sacktag_state *state,
1615 u32 skip_to_seq) 1615 u32 skip_to_seq)
1616{ 1616{
1617 if (next_dup == NULL) 1617 if (!next_dup)
1618 return skb; 1618 return skb;
1619 1619
1620 if (before(next_dup->start_seq, skip_to_seq)) { 1620 if (before(next_dup->start_seq, skip_to_seq)) {
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1783 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1783 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1784 /* ...but better entrypoint exists! */ 1784 /* ...but better entrypoint exists! */
1785 skb = tcp_highest_sack(sk); 1785 skb = tcp_highest_sack(sk);
1786 if (skb == NULL) 1786 if (!skb)
1787 break; 1787 break;
1788 state.fack_count = tp->fackets_out; 1788 state.fack_count = tp->fackets_out;
1789 cache++; 1789 cache++;
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1798 1798
1799 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1799 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1800 skb = tcp_highest_sack(sk); 1800 skb = tcp_highest_sack(sk);
1801 if (skb == NULL) 1801 if (!skb)
1802 break; 1802 break;
1803 state.fack_count = tp->fackets_out; 1803 state.fack_count = tp->fackets_out;
1804 } 1804 }
@@ -3698,7 +3698,7 @@ void tcp_parse_options(const struct sk_buff *skb,
3698 */ 3698 */
3699 if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || 3699 if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
3700 get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || 3700 get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
3701 foc == NULL || !th->syn || (opsize & 1)) 3701 !foc || !th->syn || (opsize & 1))
3702 break; 3702 break;
3703 foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; 3703 foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
3704 if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && 3704 if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
@@ -4669,7 +4669,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4669 struct sk_buff *head; 4669 struct sk_buff *head;
4670 u32 start, end; 4670 u32 start, end;
4671 4671
4672 if (skb == NULL) 4672 if (!skb)
4673 return; 4673 return;
4674 4674
4675 start = TCP_SKB_CB(skb)->seq; 4675 start = TCP_SKB_CB(skb)->seq;
@@ -5124,7 +5124,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5124{ 5124{
5125 struct tcp_sock *tp = tcp_sk(sk); 5125 struct tcp_sock *tp = tcp_sk(sk);
5126 5126
5127 if (unlikely(sk->sk_rx_dst == NULL)) 5127 if (unlikely(!sk->sk_rx_dst))
5128 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); 5128 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5129 /* 5129 /*
5130 * Header prediction. 5130 * Header prediction.
@@ -5694,7 +5694,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5694 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 5694 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
5695 sk->sk_state != TCP_FIN_WAIT1); 5695 sk->sk_state != TCP_FIN_WAIT1);
5696 5696
5697 if (tcp_check_req(sk, skb, req, true) == NULL) 5697 if (!tcp_check_req(sk, skb, req, true))
5698 goto discard; 5698 goto discard;
5699 } 5699 }
5700 5700