diff options
author | Ian Morris <ipm@chirality.org.uk> | 2015-04-03 04:17:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-03 12:11:15 -0400 |
commit | 51456b2914a34d16b1255b7c55d5cbf6a681d306 (patch) | |
tree | b8f1135150269f591105f787fbf7c7d8c2307d3e /net/ipv4/tcp_output.c | |
parent | 11a9c7821c583aa22b35f37fba20539def9e8f14 (diff) |
ipv4: coding style: comparison for equality with NULL
The ipv4 code uses a mixture of coding styles. In some instances check
for NULL pointer is done as x == NULL and sometimes as !x. !x is
preferred according to checkpatch and this patch makes the code
consistent by adopting the latter form.
No changes detected by objdiff.
Signed-off-by: Ian Morris <ipm@chirality.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2e69b8d16e68..bdc80734cd2c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -565,7 +565,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
565 | opts->mss = tcp_advertise_mss(sk); | 565 | opts->mss = tcp_advertise_mss(sk); |
566 | remaining -= TCPOLEN_MSS_ALIGNED; | 566 | remaining -= TCPOLEN_MSS_ALIGNED; |
567 | 567 | ||
568 | if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { | 568 | if (likely(sysctl_tcp_timestamps && !*md5)) { |
569 | opts->options |= OPTION_TS; | 569 | opts->options |= OPTION_TS; |
570 | opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; | 570 | opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; |
571 | opts->tsecr = tp->rx_opt.ts_recent; | 571 | opts->tsecr = tp->rx_opt.ts_recent; |
@@ -1148,7 +1148,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, | |||
1148 | 1148 | ||
1149 | /* Get a new skb... force flag on. */ | 1149 | /* Get a new skb... force flag on. */ |
1150 | buff = sk_stream_alloc_skb(sk, nsize, gfp); | 1150 | buff = sk_stream_alloc_skb(sk, nsize, gfp); |
1151 | if (buff == NULL) | 1151 | if (!buff) |
1152 | return -ENOMEM; /* We'll just try again later. */ | 1152 | return -ENOMEM; /* We'll just try again later. */ |
1153 | 1153 | ||
1154 | sk->sk_wmem_queued += buff->truesize; | 1154 | sk->sk_wmem_queued += buff->truesize; |
@@ -1707,7 +1707,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
1707 | return tcp_fragment(sk, skb, len, mss_now, gfp); | 1707 | return tcp_fragment(sk, skb, len, mss_now, gfp); |
1708 | 1708 | ||
1709 | buff = sk_stream_alloc_skb(sk, 0, gfp); | 1709 | buff = sk_stream_alloc_skb(sk, 0, gfp); |
1710 | if (unlikely(buff == NULL)) | 1710 | if (unlikely(!buff)) |
1711 | return -ENOMEM; | 1711 | return -ENOMEM; |
1712 | 1712 | ||
1713 | sk->sk_wmem_queued += buff->truesize; | 1713 | sk->sk_wmem_queued += buff->truesize; |
@@ -1925,7 +1925,8 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1925 | } | 1925 | } |
1926 | 1926 | ||
1927 | /* We're allowed to probe. Build it now. */ | 1927 | /* We're allowed to probe. Build it now. */ |
1928 | if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) | 1928 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC); |
1929 | if (!nskb) | ||
1929 | return -1; | 1930 | return -1; |
1930 | sk->sk_wmem_queued += nskb->truesize; | 1931 | sk->sk_wmem_queued += nskb->truesize; |
1931 | sk_mem_charge(sk, nskb->truesize); | 1932 | sk_mem_charge(sk, nskb->truesize); |
@@ -2733,7 +2734,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2733 | if (skb == tcp_send_head(sk)) | 2734 | if (skb == tcp_send_head(sk)) |
2734 | break; | 2735 | break; |
2735 | /* we could do better than to assign each time */ | 2736 | /* we could do better than to assign each time */ |
2736 | if (hole == NULL) | 2737 | if (!hole) |
2737 | tp->retransmit_skb_hint = skb; | 2738 | tp->retransmit_skb_hint = skb; |
2738 | 2739 | ||
2739 | /* Assume this retransmit will generate | 2740 | /* Assume this retransmit will generate |
@@ -2765,7 +2766,7 @@ begin_fwd: | |||
2765 | goto begin_fwd; | 2766 | goto begin_fwd; |
2766 | 2767 | ||
2767 | } else if (!(sacked & TCPCB_LOST)) { | 2768 | } else if (!(sacked & TCPCB_LOST)) { |
2768 | if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) | 2769 | if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) |
2769 | hole = skb; | 2770 | hole = skb; |
2770 | continue; | 2771 | continue; |
2771 | 2772 | ||
@@ -2868,14 +2869,14 @@ int tcp_send_synack(struct sock *sk) | |||
2868 | struct sk_buff *skb; | 2869 | struct sk_buff *skb; |
2869 | 2870 | ||
2870 | skb = tcp_write_queue_head(sk); | 2871 | skb = tcp_write_queue_head(sk); |
2871 | if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { | 2872 | if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
2872 | pr_debug("%s: wrong queue state\n", __func__); | 2873 | pr_debug("%s: wrong queue state\n", __func__); |
2873 | return -EFAULT; | 2874 | return -EFAULT; |
2874 | } | 2875 | } |
2875 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { | 2876 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { |
2876 | if (skb_cloned(skb)) { | 2877 | if (skb_cloned(skb)) { |
2877 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); | 2878 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); |
2878 | if (nskb == NULL) | 2879 | if (!nskb) |
2879 | return -ENOMEM; | 2880 | return -ENOMEM; |
2880 | tcp_unlink_write_queue(skb, sk); | 2881 | tcp_unlink_write_queue(skb, sk); |
2881 | __skb_header_release(nskb); | 2882 | __skb_header_release(nskb); |
@@ -3300,7 +3301,7 @@ void tcp_send_ack(struct sock *sk) | |||
3300 | * sock. | 3301 | * sock. |
3301 | */ | 3302 | */ |
3302 | buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); | 3303 | buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3303 | if (buff == NULL) { | 3304 | if (!buff) { |
3304 | inet_csk_schedule_ack(sk); | 3305 | inet_csk_schedule_ack(sk); |
3305 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; | 3306 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
3306 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | 3307 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
@@ -3344,7 +3345,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) | |||
3344 | 3345 | ||
3345 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ | 3346 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ |
3346 | skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); | 3347 | skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3347 | if (skb == NULL) | 3348 | if (!skb) |
3348 | return -1; | 3349 | return -1; |
3349 | 3350 | ||
3350 | /* Reserve space for headers and set control bits. */ | 3351 | /* Reserve space for headers and set control bits. */ |