diff options
author | Ian Morris <ipm@chirality.org.uk> | 2015-04-03 04:17:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-03 12:11:15 -0400 |
commit | 51456b2914a34d16b1255b7c55d5cbf6a681d306 (patch) | |
tree | b8f1135150269f591105f787fbf7c7d8c2307d3e /net/ipv4 | |
parent | 11a9c7821c583aa22b35f37fba20539def9e8f14 (diff) |
ipv4: coding style: comparison for equality with NULL
The ipv4 code uses a mixture of coding styles. In some instances check
for NULL pointer is done as x == NULL and sometimes as !x. !x is
preferred according to checkpatch and this patch makes the code
consistent by adopting the latter form.
No changes detected by objdiff.
Signed-off-by: Ian Morris <ipm@chirality.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
39 files changed, 210 insertions, 202 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 64a9c0fdc4aa..7d3b00c01bc8 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog) | |||
217 | * shutdown() (rather than close()). | 217 | * shutdown() (rather than close()). |
218 | */ | 218 | */ |
219 | if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 && | 219 | if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 && |
220 | inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) { | 220 | !inet_csk(sk)->icsk_accept_queue.fastopenq) { |
221 | if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0) | 221 | if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0) |
222 | err = fastopen_init_queue(sk, backlog); | 222 | err = fastopen_init_queue(sk, backlog); |
223 | else if ((sysctl_tcp_fastopen & | 223 | else if ((sysctl_tcp_fastopen & |
@@ -314,11 +314,11 @@ lookup_protocol: | |||
314 | answer_flags = answer->flags; | 314 | answer_flags = answer->flags; |
315 | rcu_read_unlock(); | 315 | rcu_read_unlock(); |
316 | 316 | ||
317 | WARN_ON(answer_prot->slab == NULL); | 317 | WARN_ON(!answer_prot->slab); |
318 | 318 | ||
319 | err = -ENOBUFS; | 319 | err = -ENOBUFS; |
320 | sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); | 320 | sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); |
321 | if (sk == NULL) | 321 | if (!sk) |
322 | goto out; | 322 | goto out; |
323 | 323 | ||
324 | err = 0; | 324 | err = 0; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 5f5c674e130a..ffe84226a2c8 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -228,7 +228,7 @@ static int arp_constructor(struct neighbour *neigh) | |||
228 | 228 | ||
229 | rcu_read_lock(); | 229 | rcu_read_lock(); |
230 | in_dev = __in_dev_get_rcu(dev); | 230 | in_dev = __in_dev_get_rcu(dev); |
231 | if (in_dev == NULL) { | 231 | if (!in_dev) { |
232 | rcu_read_unlock(); | 232 | rcu_read_unlock(); |
233 | return -EINVAL; | 233 | return -EINVAL; |
234 | } | 234 | } |
@@ -475,7 +475,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev, | |||
475 | */ | 475 | */ |
476 | 476 | ||
477 | /* | 477 | /* |
478 | * Create an arp packet. If (dest_hw == NULL), we create a broadcast | 478 | * Create an arp packet. If dest_hw is not set, we create a broadcast |
479 | * message. | 479 | * message. |
480 | */ | 480 | */ |
481 | struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | 481 | struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, |
@@ -495,7 +495,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
495 | */ | 495 | */ |
496 | 496 | ||
497 | skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); | 497 | skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); |
498 | if (skb == NULL) | 498 | if (!skb) |
499 | return NULL; | 499 | return NULL; |
500 | 500 | ||
501 | skb_reserve(skb, hlen); | 501 | skb_reserve(skb, hlen); |
@@ -503,9 +503,9 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
503 | arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); | 503 | arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); |
504 | skb->dev = dev; | 504 | skb->dev = dev; |
505 | skb->protocol = htons(ETH_P_ARP); | 505 | skb->protocol = htons(ETH_P_ARP); |
506 | if (src_hw == NULL) | 506 | if (!src_hw) |
507 | src_hw = dev->dev_addr; | 507 | src_hw = dev->dev_addr; |
508 | if (dest_hw == NULL) | 508 | if (!dest_hw) |
509 | dest_hw = dev->broadcast; | 509 | dest_hw = dev->broadcast; |
510 | 510 | ||
511 | /* | 511 | /* |
@@ -614,7 +614,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, | |||
614 | 614 | ||
615 | skb = arp_create(type, ptype, dest_ip, dev, src_ip, | 615 | skb = arp_create(type, ptype, dest_ip, dev, src_ip, |
616 | dest_hw, src_hw, target_hw); | 616 | dest_hw, src_hw, target_hw); |
617 | if (skb == NULL) | 617 | if (!skb) |
618 | return; | 618 | return; |
619 | 619 | ||
620 | arp_xmit(skb); | 620 | arp_xmit(skb); |
@@ -644,7 +644,7 @@ static int arp_process(struct sk_buff *skb) | |||
644 | * is ARP'able. | 644 | * is ARP'able. |
645 | */ | 645 | */ |
646 | 646 | ||
647 | if (in_dev == NULL) | 647 | if (!in_dev) |
648 | goto out; | 648 | goto out; |
649 | 649 | ||
650 | arp = arp_hdr(skb); | 650 | arp = arp_hdr(skb); |
@@ -808,7 +808,7 @@ static int arp_process(struct sk_buff *skb) | |||
808 | is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && | 808 | is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && |
809 | inet_addr_type(net, sip) == RTN_UNICAST; | 809 | inet_addr_type(net, sip) == RTN_UNICAST; |
810 | 810 | ||
811 | if (n == NULL && | 811 | if (!n && |
812 | ((arp->ar_op == htons(ARPOP_REPLY) && | 812 | ((arp->ar_op == htons(ARPOP_REPLY) && |
813 | inet_addr_type(net, sip) == RTN_UNICAST) || is_garp)) | 813 | inet_addr_type(net, sip) == RTN_UNICAST) || is_garp)) |
814 | n = __neigh_lookup(&arp_tbl, &sip, dev, 1); | 814 | n = __neigh_lookup(&arp_tbl, &sip, dev, 1); |
@@ -900,7 +900,7 @@ out_of_mem: | |||
900 | 900 | ||
901 | static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on) | 901 | static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on) |
902 | { | 902 | { |
903 | if (dev == NULL) { | 903 | if (!dev) { |
904 | IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; | 904 | IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; |
905 | return 0; | 905 | return 0; |
906 | } | 906 | } |
@@ -926,7 +926,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r, | |||
926 | return -ENODEV; | 926 | return -ENODEV; |
927 | } | 927 | } |
928 | if (mask) { | 928 | if (mask) { |
929 | if (pneigh_lookup(&arp_tbl, net, &ip, dev, 1) == NULL) | 929 | if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1)) |
930 | return -ENOBUFS; | 930 | return -ENOBUFS; |
931 | return 0; | 931 | return 0; |
932 | } | 932 | } |
@@ -947,7 +947,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, | |||
947 | ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; | 947 | ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; |
948 | if (r->arp_flags & ATF_PERM) | 948 | if (r->arp_flags & ATF_PERM) |
949 | r->arp_flags |= ATF_COM; | 949 | r->arp_flags |= ATF_COM; |
950 | if (dev == NULL) { | 950 | if (!dev) { |
951 | struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); | 951 | struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); |
952 | 952 | ||
953 | if (IS_ERR(rt)) | 953 | if (IS_ERR(rt)) |
@@ -1067,7 +1067,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1067 | return arp_req_delete_public(net, r, dev); | 1067 | return arp_req_delete_public(net, r, dev); |
1068 | 1068 | ||
1069 | ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; | 1069 | ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; |
1070 | if (dev == NULL) { | 1070 | if (!dev) { |
1071 | struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); | 1071 | struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); |
1072 | if (IS_ERR(rt)) | 1072 | if (IS_ERR(rt)) |
1073 | return PTR_ERR(rt); | 1073 | return PTR_ERR(rt); |
@@ -1116,7 +1116,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
1116 | if (r.arp_dev[0]) { | 1116 | if (r.arp_dev[0]) { |
1117 | err = -ENODEV; | 1117 | err = -ENODEV; |
1118 | dev = __dev_get_by_name(net, r.arp_dev); | 1118 | dev = __dev_get_by_name(net, r.arp_dev); |
1119 | if (dev == NULL) | 1119 | if (!dev) |
1120 | goto out; | 1120 | goto out; |
1121 | 1121 | ||
1122 | /* Mmmm... It is wrong... ARPHRD_NETROM==0 */ | 1122 | /* Mmmm... It is wrong... ARPHRD_NETROM==0 */ |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index e361ea6f3fc8..1b28e1183c1b 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -255,7 +255,7 @@ static int __init cipso_v4_cache_init(void) | |||
255 | cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, | 255 | cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, |
256 | sizeof(struct cipso_v4_map_cache_bkt), | 256 | sizeof(struct cipso_v4_map_cache_bkt), |
257 | GFP_KERNEL); | 257 | GFP_KERNEL); |
258 | if (cipso_v4_cache == NULL) | 258 | if (!cipso_v4_cache) |
259 | return -ENOMEM; | 259 | return -ENOMEM; |
260 | 260 | ||
261 | for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { | 261 | for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { |
@@ -339,7 +339,7 @@ static int cipso_v4_cache_check(const unsigned char *key, | |||
339 | secattr->cache = entry->lsm_data; | 339 | secattr->cache = entry->lsm_data; |
340 | secattr->flags |= NETLBL_SECATTR_CACHE; | 340 | secattr->flags |= NETLBL_SECATTR_CACHE; |
341 | secattr->type = NETLBL_NLTYPE_CIPSOV4; | 341 | secattr->type = NETLBL_NLTYPE_CIPSOV4; |
342 | if (prev_entry == NULL) { | 342 | if (!prev_entry) { |
343 | spin_unlock_bh(&cipso_v4_cache[bkt].lock); | 343 | spin_unlock_bh(&cipso_v4_cache[bkt].lock); |
344 | return 0; | 344 | return 0; |
345 | } | 345 | } |
@@ -393,10 +393,10 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr, | |||
393 | cipso_ptr_len = cipso_ptr[1]; | 393 | cipso_ptr_len = cipso_ptr[1]; |
394 | 394 | ||
395 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 395 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
396 | if (entry == NULL) | 396 | if (!entry) |
397 | return -ENOMEM; | 397 | return -ENOMEM; |
398 | entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); | 398 | entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); |
399 | if (entry->key == NULL) { | 399 | if (!entry->key) { |
400 | ret_val = -ENOMEM; | 400 | ret_val = -ENOMEM; |
401 | goto cache_add_failure; | 401 | goto cache_add_failure; |
402 | } | 402 | } |
@@ -547,7 +547,7 @@ doi_add_return: | |||
547 | */ | 547 | */ |
548 | void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) | 548 | void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) |
549 | { | 549 | { |
550 | if (doi_def == NULL) | 550 | if (!doi_def) |
551 | return; | 551 | return; |
552 | 552 | ||
553 | switch (doi_def->type) { | 553 | switch (doi_def->type) { |
@@ -598,7 +598,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) | |||
598 | 598 | ||
599 | spin_lock(&cipso_v4_doi_list_lock); | 599 | spin_lock(&cipso_v4_doi_list_lock); |
600 | doi_def = cipso_v4_doi_search(doi); | 600 | doi_def = cipso_v4_doi_search(doi); |
601 | if (doi_def == NULL) { | 601 | if (!doi_def) { |
602 | spin_unlock(&cipso_v4_doi_list_lock); | 602 | spin_unlock(&cipso_v4_doi_list_lock); |
603 | ret_val = -ENOENT; | 603 | ret_val = -ENOENT; |
604 | goto doi_remove_return; | 604 | goto doi_remove_return; |
@@ -644,7 +644,7 @@ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) | |||
644 | 644 | ||
645 | rcu_read_lock(); | 645 | rcu_read_lock(); |
646 | doi_def = cipso_v4_doi_search(doi); | 646 | doi_def = cipso_v4_doi_search(doi); |
647 | if (doi_def == NULL) | 647 | if (!doi_def) |
648 | goto doi_getdef_return; | 648 | goto doi_getdef_return; |
649 | if (!atomic_inc_not_zero(&doi_def->refcount)) | 649 | if (!atomic_inc_not_zero(&doi_def->refcount)) |
650 | doi_def = NULL; | 650 | doi_def = NULL; |
@@ -664,7 +664,7 @@ doi_getdef_return: | |||
664 | */ | 664 | */ |
665 | void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) | 665 | void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) |
666 | { | 666 | { |
667 | if (doi_def == NULL) | 667 | if (!doi_def) |
668 | return; | 668 | return; |
669 | 669 | ||
670 | if (!atomic_dec_and_test(&doi_def->refcount)) | 670 | if (!atomic_dec_and_test(&doi_def->refcount)) |
@@ -1642,7 +1642,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) | |||
1642 | 1642 | ||
1643 | rcu_read_lock(); | 1643 | rcu_read_lock(); |
1644 | doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); | 1644 | doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); |
1645 | if (doi_def == NULL) { | 1645 | if (!doi_def) { |
1646 | err_offset = 2; | 1646 | err_offset = 2; |
1647 | goto validate_return_locked; | 1647 | goto validate_return_locked; |
1648 | } | 1648 | } |
@@ -1736,7 +1736,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) | |||
1736 | * not the loopback device drop the packet. Further, | 1736 | * not the loopback device drop the packet. Further, |
1737 | * there is no legitimate reason for setting this from | 1737 | * there is no legitimate reason for setting this from |
1738 | * userspace so reject it if skb is NULL. */ | 1738 | * userspace so reject it if skb is NULL. */ |
1739 | if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) { | 1739 | if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) { |
1740 | err_offset = opt_iter; | 1740 | err_offset = opt_iter; |
1741 | goto validate_return_locked; | 1741 | goto validate_return_locked; |
1742 | } | 1742 | } |
@@ -1897,7 +1897,7 @@ int cipso_v4_sock_setattr(struct sock *sk, | |||
1897 | * defined yet but it is not a problem as the only users of these | 1897 | * defined yet but it is not a problem as the only users of these |
1898 | * "lite" PF_INET sockets are functions which do an accept() call | 1898 | * "lite" PF_INET sockets are functions which do an accept() call |
1899 | * afterwards so we will label the socket as part of the accept(). */ | 1899 | * afterwards so we will label the socket as part of the accept(). */ |
1900 | if (sk == NULL) | 1900 | if (!sk) |
1901 | return 0; | 1901 | return 0; |
1902 | 1902 | ||
1903 | /* We allocate the maximum CIPSO option size here so we are probably | 1903 | /* We allocate the maximum CIPSO option size here so we are probably |
@@ -1905,7 +1905,7 @@ int cipso_v4_sock_setattr(struct sock *sk, | |||
1905 | * on and after all we are only talking about 40 bytes. */ | 1905 | * on and after all we are only talking about 40 bytes. */ |
1906 | buf_len = CIPSO_V4_OPT_LEN_MAX; | 1906 | buf_len = CIPSO_V4_OPT_LEN_MAX; |
1907 | buf = kmalloc(buf_len, GFP_ATOMIC); | 1907 | buf = kmalloc(buf_len, GFP_ATOMIC); |
1908 | if (buf == NULL) { | 1908 | if (!buf) { |
1909 | ret_val = -ENOMEM; | 1909 | ret_val = -ENOMEM; |
1910 | goto socket_setattr_failure; | 1910 | goto socket_setattr_failure; |
1911 | } | 1911 | } |
@@ -1921,7 +1921,7 @@ int cipso_v4_sock_setattr(struct sock *sk, | |||
1921 | * set the IPOPT_CIPSO option. */ | 1921 | * set the IPOPT_CIPSO option. */ |
1922 | opt_len = (buf_len + 3) & ~3; | 1922 | opt_len = (buf_len + 3) & ~3; |
1923 | opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); | 1923 | opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); |
1924 | if (opt == NULL) { | 1924 | if (!opt) { |
1925 | ret_val = -ENOMEM; | 1925 | ret_val = -ENOMEM; |
1926 | goto socket_setattr_failure; | 1926 | goto socket_setattr_failure; |
1927 | } | 1927 | } |
@@ -1981,7 +1981,7 @@ int cipso_v4_req_setattr(struct request_sock *req, | |||
1981 | * on and after all we are only talking about 40 bytes. */ | 1981 | * on and after all we are only talking about 40 bytes. */ |
1982 | buf_len = CIPSO_V4_OPT_LEN_MAX; | 1982 | buf_len = CIPSO_V4_OPT_LEN_MAX; |
1983 | buf = kmalloc(buf_len, GFP_ATOMIC); | 1983 | buf = kmalloc(buf_len, GFP_ATOMIC); |
1984 | if (buf == NULL) { | 1984 | if (!buf) { |
1985 | ret_val = -ENOMEM; | 1985 | ret_val = -ENOMEM; |
1986 | goto req_setattr_failure; | 1986 | goto req_setattr_failure; |
1987 | } | 1987 | } |
@@ -1997,7 +1997,7 @@ int cipso_v4_req_setattr(struct request_sock *req, | |||
1997 | * set the IPOPT_CIPSO option. */ | 1997 | * set the IPOPT_CIPSO option. */ |
1998 | opt_len = (buf_len + 3) & ~3; | 1998 | opt_len = (buf_len + 3) & ~3; |
1999 | opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); | 1999 | opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); |
2000 | if (opt == NULL) { | 2000 | if (!opt) { |
2001 | ret_val = -ENOMEM; | 2001 | ret_val = -ENOMEM; |
2002 | goto req_setattr_failure; | 2002 | goto req_setattr_failure; |
2003 | } | 2003 | } |
@@ -2102,7 +2102,7 @@ void cipso_v4_sock_delattr(struct sock *sk) | |||
2102 | 2102 | ||
2103 | sk_inet = inet_sk(sk); | 2103 | sk_inet = inet_sk(sk); |
2104 | opt = rcu_dereference_protected(sk_inet->inet_opt, 1); | 2104 | opt = rcu_dereference_protected(sk_inet->inet_opt, 1); |
2105 | if (opt == NULL || opt->opt.cipso == 0) | 2105 | if (!opt || opt->opt.cipso == 0) |
2106 | return; | 2106 | return; |
2107 | 2107 | ||
2108 | hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); | 2108 | hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); |
@@ -2128,7 +2128,7 @@ void cipso_v4_req_delattr(struct request_sock *req) | |||
2128 | 2128 | ||
2129 | req_inet = inet_rsk(req); | 2129 | req_inet = inet_rsk(req); |
2130 | opt = req_inet->opt; | 2130 | opt = req_inet->opt; |
2131 | if (opt == NULL || opt->opt.cipso == 0) | 2131 | if (!opt || opt->opt.cipso == 0) |
2132 | return; | 2132 | return; |
2133 | 2133 | ||
2134 | cipso_v4_delopt(&req_inet->opt); | 2134 | cipso_v4_delopt(&req_inet->opt); |
@@ -2157,7 +2157,7 @@ int cipso_v4_getattr(const unsigned char *cipso, | |||
2157 | doi = get_unaligned_be32(&cipso[2]); | 2157 | doi = get_unaligned_be32(&cipso[2]); |
2158 | rcu_read_lock(); | 2158 | rcu_read_lock(); |
2159 | doi_def = cipso_v4_doi_search(doi); | 2159 | doi_def = cipso_v4_doi_search(doi); |
2160 | if (doi_def == NULL) | 2160 | if (!doi_def) |
2161 | goto getattr_return; | 2161 | goto getattr_return; |
2162 | /* XXX - This code assumes only one tag per CIPSO option which isn't | 2162 | /* XXX - This code assumes only one tag per CIPSO option which isn't |
2163 | * really a good assumption to make but since we only support the MAC | 2163 | * really a good assumption to make but since we only support the MAC |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index c6473f365ad1..0ee21689d37e 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -585,7 +585,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
585 | 585 | ||
586 | ifm = nlmsg_data(nlh); | 586 | ifm = nlmsg_data(nlh); |
587 | in_dev = inetdev_by_index(net, ifm->ifa_index); | 587 | in_dev = inetdev_by_index(net, ifm->ifa_index); |
588 | if (in_dev == NULL) { | 588 | if (!in_dev) { |
589 | err = -ENODEV; | 589 | err = -ENODEV; |
590 | goto errout; | 590 | goto errout; |
591 | } | 591 | } |
@@ -755,21 +755,21 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, | |||
755 | 755 | ||
756 | ifm = nlmsg_data(nlh); | 756 | ifm = nlmsg_data(nlh); |
757 | err = -EINVAL; | 757 | err = -EINVAL; |
758 | if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL) | 758 | if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL]) |
759 | goto errout; | 759 | goto errout; |
760 | 760 | ||
761 | dev = __dev_get_by_index(net, ifm->ifa_index); | 761 | dev = __dev_get_by_index(net, ifm->ifa_index); |
762 | err = -ENODEV; | 762 | err = -ENODEV; |
763 | if (dev == NULL) | 763 | if (!dev) |
764 | goto errout; | 764 | goto errout; |
765 | 765 | ||
766 | in_dev = __in_dev_get_rtnl(dev); | 766 | in_dev = __in_dev_get_rtnl(dev); |
767 | err = -ENOBUFS; | 767 | err = -ENOBUFS; |
768 | if (in_dev == NULL) | 768 | if (!in_dev) |
769 | goto errout; | 769 | goto errout; |
770 | 770 | ||
771 | ifa = inet_alloc_ifa(); | 771 | ifa = inet_alloc_ifa(); |
772 | if (ifa == NULL) | 772 | if (!ifa) |
773 | /* | 773 | /* |
774 | * A potential indev allocation can be left alive, it stays | 774 | * A potential indev allocation can be left alive, it stays |
775 | * assigned to its device and is destroy with it. | 775 | * assigned to its device and is destroy with it. |
@@ -780,7 +780,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh, | |||
780 | neigh_parms_data_state_setall(in_dev->arp_parms); | 780 | neigh_parms_data_state_setall(in_dev->arp_parms); |
781 | in_dev_hold(in_dev); | 781 | in_dev_hold(in_dev); |
782 | 782 | ||
783 | if (tb[IFA_ADDRESS] == NULL) | 783 | if (!tb[IFA_ADDRESS]) |
784 | tb[IFA_ADDRESS] = tb[IFA_LOCAL]; | 784 | tb[IFA_ADDRESS] = tb[IFA_LOCAL]; |
785 | 785 | ||
786 | INIT_HLIST_NODE(&ifa->hash); | 786 | INIT_HLIST_NODE(&ifa->hash); |
@@ -1340,7 +1340,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) | |||
1340 | if (named++ == 0) | 1340 | if (named++ == 0) |
1341 | goto skip; | 1341 | goto skip; |
1342 | dot = strchr(old, ':'); | 1342 | dot = strchr(old, ':'); |
1343 | if (dot == NULL) { | 1343 | if (!dot) { |
1344 | sprintf(old, ":%d", named); | 1344 | sprintf(old, ":%d", named); |
1345 | dot = old; | 1345 | dot = old; |
1346 | } | 1346 | } |
@@ -1509,7 +1509,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, | |||
1509 | u32 preferred, valid; | 1509 | u32 preferred, valid; |
1510 | 1510 | ||
1511 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); | 1511 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); |
1512 | if (nlh == NULL) | 1512 | if (!nlh) |
1513 | return -EMSGSIZE; | 1513 | return -EMSGSIZE; |
1514 | 1514 | ||
1515 | ifm = nlmsg_data(nlh); | 1515 | ifm = nlmsg_data(nlh); |
@@ -1628,7 +1628,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, | |||
1628 | 1628 | ||
1629 | net = dev_net(ifa->ifa_dev->dev); | 1629 | net = dev_net(ifa->ifa_dev->dev); |
1630 | skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); | 1630 | skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); |
1631 | if (skb == NULL) | 1631 | if (!skb) |
1632 | goto errout; | 1632 | goto errout; |
1633 | 1633 | ||
1634 | err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0); | 1634 | err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0); |
@@ -1665,7 +1665,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev) | |||
1665 | return -ENODATA; | 1665 | return -ENODATA; |
1666 | 1666 | ||
1667 | nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4); | 1667 | nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4); |
1668 | if (nla == NULL) | 1668 | if (!nla) |
1669 | return -EMSGSIZE; | 1669 | return -EMSGSIZE; |
1670 | 1670 | ||
1671 | for (i = 0; i < IPV4_DEVCONF_MAX; i++) | 1671 | for (i = 0; i < IPV4_DEVCONF_MAX; i++) |
@@ -1754,7 +1754,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, | |||
1754 | 1754 | ||
1755 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), | 1755 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), |
1756 | flags); | 1756 | flags); |
1757 | if (nlh == NULL) | 1757 | if (!nlh) |
1758 | return -EMSGSIZE; | 1758 | return -EMSGSIZE; |
1759 | 1759 | ||
1760 | ncm = nlmsg_data(nlh); | 1760 | ncm = nlmsg_data(nlh); |
@@ -1796,7 +1796,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, | |||
1796 | int err = -ENOBUFS; | 1796 | int err = -ENOBUFS; |
1797 | 1797 | ||
1798 | skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC); | 1798 | skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC); |
1799 | if (skb == NULL) | 1799 | if (!skb) |
1800 | goto errout; | 1800 | goto errout; |
1801 | 1801 | ||
1802 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, | 1802 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, |
@@ -1853,10 +1853,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb, | |||
1853 | break; | 1853 | break; |
1854 | default: | 1854 | default: |
1855 | dev = __dev_get_by_index(net, ifindex); | 1855 | dev = __dev_get_by_index(net, ifindex); |
1856 | if (dev == NULL) | 1856 | if (!dev) |
1857 | goto errout; | 1857 | goto errout; |
1858 | in_dev = __in_dev_get_rtnl(dev); | 1858 | in_dev = __in_dev_get_rtnl(dev); |
1859 | if (in_dev == NULL) | 1859 | if (!in_dev) |
1860 | goto errout; | 1860 | goto errout; |
1861 | devconf = &in_dev->cnf; | 1861 | devconf = &in_dev->cnf; |
1862 | break; | 1862 | break; |
@@ -1864,7 +1864,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb, | |||
1864 | 1864 | ||
1865 | err = -ENOBUFS; | 1865 | err = -ENOBUFS; |
1866 | skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC); | 1866 | skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC); |
1867 | if (skb == NULL) | 1867 | if (!skb) |
1868 | goto errout; | 1868 | goto errout; |
1869 | 1869 | ||
1870 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, | 1870 | err = inet_netconf_fill_devconf(skb, ifindex, devconf, |
@@ -2215,7 +2215,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf) | |||
2215 | { | 2215 | { |
2216 | struct devinet_sysctl_table *t = cnf->sysctl; | 2216 | struct devinet_sysctl_table *t = cnf->sysctl; |
2217 | 2217 | ||
2218 | if (t == NULL) | 2218 | if (!t) |
2219 | return; | 2219 | return; |
2220 | 2220 | ||
2221 | cnf->sysctl = NULL; | 2221 | cnf->sysctl = NULL; |
@@ -2276,16 +2276,16 @@ static __net_init int devinet_init_net(struct net *net) | |||
2276 | 2276 | ||
2277 | if (!net_eq(net, &init_net)) { | 2277 | if (!net_eq(net, &init_net)) { |
2278 | all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL); | 2278 | all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL); |
2279 | if (all == NULL) | 2279 | if (!all) |
2280 | goto err_alloc_all; | 2280 | goto err_alloc_all; |
2281 | 2281 | ||
2282 | dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL); | 2282 | dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL); |
2283 | if (dflt == NULL) | 2283 | if (!dflt) |
2284 | goto err_alloc_dflt; | 2284 | goto err_alloc_dflt; |
2285 | 2285 | ||
2286 | #ifdef CONFIG_SYSCTL | 2286 | #ifdef CONFIG_SYSCTL |
2287 | tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL); | 2287 | tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL); |
2288 | if (tbl == NULL) | 2288 | if (!tbl) |
2289 | goto err_alloc_ctl; | 2289 | goto err_alloc_ctl; |
2290 | 2290 | ||
2291 | tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; | 2291 | tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; |
@@ -2305,7 +2305,7 @@ static __net_init int devinet_init_net(struct net *net) | |||
2305 | 2305 | ||
2306 | err = -ENOMEM; | 2306 | err = -ENOMEM; |
2307 | forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); | 2307 | forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); |
2308 | if (forw_hdr == NULL) | 2308 | if (!forw_hdr) |
2309 | goto err_reg_ctl; | 2309 | goto err_reg_ctl; |
2310 | net->ipv4.forw_hdr = forw_hdr; | 2310 | net->ipv4.forw_hdr = forw_hdr; |
2311 | #endif | 2311 | #endif |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 60173d4d3a0e..421a80b09b62 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -553,7 +553,7 @@ static int esp_init_authenc(struct xfrm_state *x) | |||
553 | int err; | 553 | int err; |
554 | 554 | ||
555 | err = -EINVAL; | 555 | err = -EINVAL; |
556 | if (x->ealg == NULL) | 556 | if (!x->ealg) |
557 | goto error; | 557 | goto error; |
558 | 558 | ||
559 | err = -ENAMETOOLONG; | 559 | err = -ENAMETOOLONG; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 718b0a16ea40..2166d2bf1562 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -53,11 +53,11 @@ static int __net_init fib4_rules_init(struct net *net) | |||
53 | struct fib_table *local_table, *main_table; | 53 | struct fib_table *local_table, *main_table; |
54 | 54 | ||
55 | main_table = fib_trie_table(RT_TABLE_MAIN, NULL); | 55 | main_table = fib_trie_table(RT_TABLE_MAIN, NULL); |
56 | if (main_table == NULL) | 56 | if (!main_table) |
57 | return -ENOMEM; | 57 | return -ENOMEM; |
58 | 58 | ||
59 | local_table = fib_trie_table(RT_TABLE_LOCAL, main_table); | 59 | local_table = fib_trie_table(RT_TABLE_LOCAL, main_table); |
60 | if (local_table == NULL) | 60 | if (!local_table) |
61 | goto fail; | 61 | goto fail; |
62 | 62 | ||
63 | hlist_add_head_rcu(&local_table->tb_hlist, | 63 | hlist_add_head_rcu(&local_table->tb_hlist, |
@@ -486,7 +486,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, | |||
486 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) | 486 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) |
487 | if (strcmp(ifa->ifa_label, devname) == 0) | 487 | if (strcmp(ifa->ifa_label, devname) == 0) |
488 | break; | 488 | break; |
489 | if (ifa == NULL) | 489 | if (!ifa) |
490 | return -ENODEV; | 490 | return -ENODEV; |
491 | cfg->fc_prefsrc = ifa->ifa_local; | 491 | cfg->fc_prefsrc = ifa->ifa_local; |
492 | } | 492 | } |
@@ -514,7 +514,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, | |||
514 | int len = 0; | 514 | int len = 0; |
515 | 515 | ||
516 | mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); | 516 | mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); |
517 | if (mx == NULL) | 517 | if (!mx) |
518 | return -ENOMEM; | 518 | return -ENOMEM; |
519 | 519 | ||
520 | if (rt->rt_flags & RTF_MTU) | 520 | if (rt->rt_flags & RTF_MTU) |
@@ -676,7 +676,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
676 | goto errout; | 676 | goto errout; |
677 | 677 | ||
678 | tb = fib_get_table(net, cfg.fc_table); | 678 | tb = fib_get_table(net, cfg.fc_table); |
679 | if (tb == NULL) { | 679 | if (!tb) { |
680 | err = -ESRCH; | 680 | err = -ESRCH; |
681 | goto errout; | 681 | goto errout; |
682 | } | 682 | } |
@@ -698,7 +698,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
698 | goto errout; | 698 | goto errout; |
699 | 699 | ||
700 | tb = fib_new_table(net, cfg.fc_table); | 700 | tb = fib_new_table(net, cfg.fc_table); |
701 | if (tb == NULL) { | 701 | if (!tb) { |
702 | err = -ENOBUFS; | 702 | err = -ENOBUFS; |
703 | goto errout; | 703 | goto errout; |
704 | } | 704 | } |
@@ -779,7 +779,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad | |||
779 | else | 779 | else |
780 | tb = fib_new_table(net, RT_TABLE_LOCAL); | 780 | tb = fib_new_table(net, RT_TABLE_LOCAL); |
781 | 781 | ||
782 | if (tb == NULL) | 782 | if (!tb) |
783 | return; | 783 | return; |
784 | 784 | ||
785 | cfg.fc_table = tb->tb_id; | 785 | cfg.fc_table = tb->tb_id; |
@@ -806,7 +806,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) | |||
806 | 806 | ||
807 | if (ifa->ifa_flags & IFA_F_SECONDARY) { | 807 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
808 | prim = inet_ifa_byprefix(in_dev, prefix, mask); | 808 | prim = inet_ifa_byprefix(in_dev, prefix, mask); |
809 | if (prim == NULL) { | 809 | if (!prim) { |
810 | pr_warn("%s: bug: prim == NULL\n", __func__); | 810 | pr_warn("%s: bug: prim == NULL\n", __func__); |
811 | return; | 811 | return; |
812 | } | 812 | } |
@@ -860,7 +860,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) | |||
860 | 860 | ||
861 | if (ifa->ifa_flags & IFA_F_SECONDARY) { | 861 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
862 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); | 862 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); |
863 | if (prim == NULL) { | 863 | if (!prim) { |
864 | pr_warn("%s: bug: prim == NULL\n", __func__); | 864 | pr_warn("%s: bug: prim == NULL\n", __func__); |
865 | return; | 865 | return; |
866 | } | 866 | } |
@@ -1030,7 +1030,7 @@ static void nl_fib_input(struct sk_buff *skb) | |||
1030 | return; | 1030 | return; |
1031 | 1031 | ||
1032 | skb = netlink_skb_clone(skb, GFP_KERNEL); | 1032 | skb = netlink_skb_clone(skb, GFP_KERNEL); |
1033 | if (skb == NULL) | 1033 | if (!skb) |
1034 | return; | 1034 | return; |
1035 | nlh = nlmsg_hdr(skb); | 1035 | nlh = nlmsg_hdr(skb); |
1036 | 1036 | ||
@@ -1051,7 +1051,7 @@ static int __net_init nl_fib_lookup_init(struct net *net) | |||
1051 | }; | 1051 | }; |
1052 | 1052 | ||
1053 | sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg); | 1053 | sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg); |
1054 | if (sk == NULL) | 1054 | if (!sk) |
1055 | return -EAFNOSUPPORT; | 1055 | return -EAFNOSUPPORT; |
1056 | net->ipv4.fibnl = sk; | 1056 | net->ipv4.fibnl = sk; |
1057 | return 0; | 1057 | return 0; |
@@ -1089,7 +1089,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
1089 | case NETDEV_DOWN: | 1089 | case NETDEV_DOWN: |
1090 | fib_del_ifaddr(ifa, NULL); | 1090 | fib_del_ifaddr(ifa, NULL); |
1091 | atomic_inc(&net->ipv4.dev_addr_genid); | 1091 | atomic_inc(&net->ipv4.dev_addr_genid); |
1092 | if (ifa->ifa_dev->ifa_list == NULL) { | 1092 | if (!ifa->ifa_dev->ifa_list) { |
1093 | /* Last address was deleted from this interface. | 1093 | /* Last address was deleted from this interface. |
1094 | * Disable IP. | 1094 | * Disable IP. |
1095 | */ | 1095 | */ |
@@ -1157,7 +1157,7 @@ static int __net_init ip_fib_net_init(struct net *net) | |||
1157 | size = max_t(size_t, size, L1_CACHE_BYTES); | 1157 | size = max_t(size_t, size, L1_CACHE_BYTES); |
1158 | 1158 | ||
1159 | net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL); | 1159 | net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL); |
1160 | if (net->ipv4.fib_table_hash == NULL) | 1160 | if (!net->ipv4.fib_table_hash) |
1161 | return -ENOMEM; | 1161 | return -ENOMEM; |
1162 | 1162 | ||
1163 | err = fib4_rules_init(net); | 1163 | err = fib4_rules_init(net); |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 8162dd8e86d7..56151982f74e 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -153,7 +153,7 @@ static struct fib_table *fib_empty_table(struct net *net) | |||
153 | u32 id; | 153 | u32 id; |
154 | 154 | ||
155 | for (id = 1; id <= RT_TABLE_MAX; id++) | 155 | for (id = 1; id <= RT_TABLE_MAX; id++) |
156 | if (fib_get_table(net, id) == NULL) | 156 | if (!fib_get_table(net, id)) |
157 | return fib_new_table(net, id); | 157 | return fib_new_table(net, id); |
158 | return NULL; | 158 | return NULL; |
159 | } | 159 | } |
@@ -184,7 +184,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | |||
184 | struct fib_table *table; | 184 | struct fib_table *table; |
185 | 185 | ||
186 | table = fib_empty_table(net); | 186 | table = fib_empty_table(net); |
187 | if (table == NULL) { | 187 | if (!table) { |
188 | err = -ENOBUFS; | 188 | err = -ENOBUFS; |
189 | goto errout; | 189 | goto errout; |
190 | } | 190 | } |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index eac5aec7772a..8d695b6659c7 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -390,7 +390,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, | |||
390 | int err = -ENOBUFS; | 390 | int err = -ENOBUFS; |
391 | 391 | ||
392 | skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); | 392 | skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); |
393 | if (skb == NULL) | 393 | if (!skb) |
394 | goto errout; | 394 | goto errout; |
395 | 395 | ||
396 | err = fib_dump_info(skb, info->portid, seq, event, tb_id, | 396 | err = fib_dump_info(skb, info->portid, seq, event, tb_id, |
@@ -503,7 +503,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) | |||
503 | } | 503 | } |
504 | 504 | ||
505 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 505 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
506 | if (cfg->fc_mp == NULL) | 506 | if (!cfg->fc_mp) |
507 | return 0; | 507 | return 0; |
508 | 508 | ||
509 | rtnh = cfg->fc_mp; | 509 | rtnh = cfg->fc_mp; |
@@ -646,7 +646,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, | |||
646 | rcu_read_lock(); | 646 | rcu_read_lock(); |
647 | err = -ENODEV; | 647 | err = -ENODEV; |
648 | in_dev = inetdev_by_index(net, nh->nh_oif); | 648 | in_dev = inetdev_by_index(net, nh->nh_oif); |
649 | if (in_dev == NULL) | 649 | if (!in_dev) |
650 | goto out; | 650 | goto out; |
651 | err = -ENETDOWN; | 651 | err = -ENETDOWN; |
652 | if (!(in_dev->dev->flags & IFF_UP)) | 652 | if (!(in_dev->dev->flags & IFF_UP)) |
@@ -803,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
803 | } | 803 | } |
804 | 804 | ||
805 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); | 805 | fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); |
806 | if (fi == NULL) | 806 | if (!fi) |
807 | goto failure; | 807 | goto failure; |
808 | fib_info_cnt++; | 808 | fib_info_cnt++; |
809 | if (cfg->fc_mx) { | 809 | if (cfg->fc_mx) { |
@@ -921,7 +921,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) | |||
921 | nh->nh_scope = RT_SCOPE_NOWHERE; | 921 | nh->nh_scope = RT_SCOPE_NOWHERE; |
922 | nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); | 922 | nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); |
923 | err = -ENODEV; | 923 | err = -ENODEV; |
924 | if (nh->nh_dev == NULL) | 924 | if (!nh->nh_dev) |
925 | goto failure; | 925 | goto failure; |
926 | } else { | 926 | } else { |
927 | change_nexthops(fi) { | 927 | change_nexthops(fi) { |
@@ -995,7 +995,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
995 | struct rtmsg *rtm; | 995 | struct rtmsg *rtm; |
996 | 996 | ||
997 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); | 997 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); |
998 | if (nlh == NULL) | 998 | if (!nlh) |
999 | return -EMSGSIZE; | 999 | return -EMSGSIZE; |
1000 | 1000 | ||
1001 | rtm = nlmsg_data(nlh); | 1001 | rtm = nlmsg_data(nlh); |
@@ -1045,12 +1045,12 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
1045 | struct nlattr *mp; | 1045 | struct nlattr *mp; |
1046 | 1046 | ||
1047 | mp = nla_nest_start(skb, RTA_MULTIPATH); | 1047 | mp = nla_nest_start(skb, RTA_MULTIPATH); |
1048 | if (mp == NULL) | 1048 | if (!mp) |
1049 | goto nla_put_failure; | 1049 | goto nla_put_failure; |
1050 | 1050 | ||
1051 | for_nexthops(fi) { | 1051 | for_nexthops(fi) { |
1052 | rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); | 1052 | rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); |
1053 | if (rtnh == NULL) | 1053 | if (!rtnh) |
1054 | goto nla_put_failure; | 1054 | goto nla_put_failure; |
1055 | 1055 | ||
1056 | rtnh->rtnh_flags = nh->nh_flags & 0xFF; | 1056 | rtnh->rtnh_flags = nh->nh_flags & 0xFF; |
@@ -1093,7 +1093,7 @@ int fib_sync_down_addr(struct net *net, __be32 local) | |||
1093 | struct hlist_head *head = &fib_info_laddrhash[hash]; | 1093 | struct hlist_head *head = &fib_info_laddrhash[hash]; |
1094 | struct fib_info *fi; | 1094 | struct fib_info *fi; |
1095 | 1095 | ||
1096 | if (fib_info_laddrhash == NULL || local == 0) | 1096 | if (!fib_info_laddrhash || local == 0) |
1097 | return 0; | 1097 | return 0; |
1098 | 1098 | ||
1099 | hlist_for_each_entry(fi, head, fib_lhash) { | 1099 | hlist_for_each_entry(fi, head, fib_lhash) { |
@@ -1182,7 +1182,7 @@ void fib_select_default(struct fib_result *res) | |||
1182 | 1182 | ||
1183 | fib_alias_accessed(fa); | 1183 | fib_alias_accessed(fa); |
1184 | 1184 | ||
1185 | if (fi == NULL) { | 1185 | if (!fi) { |
1186 | if (next_fi != res->fi) | 1186 | if (next_fi != res->fi) |
1187 | break; | 1187 | break; |
1188 | } else if (!fib_detect_death(fi, order, &last_resort, | 1188 | } else if (!fib_detect_death(fi, order, &last_resort, |
@@ -1195,7 +1195,7 @@ void fib_select_default(struct fib_result *res) | |||
1195 | order++; | 1195 | order++; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | if (order <= 0 || fi == NULL) { | 1198 | if (order <= 0 || !fi) { |
1199 | tb->tb_default = -1; | 1199 | tb->tb_default = -1; |
1200 | goto out; | 1200 | goto out; |
1201 | } | 1201 | } |
@@ -1251,7 +1251,7 @@ int fib_sync_up(struct net_device *dev) | |||
1251 | alive++; | 1251 | alive++; |
1252 | continue; | 1252 | continue; |
1253 | } | 1253 | } |
1254 | if (nexthop_nh->nh_dev == NULL || | 1254 | if (!nexthop_nh->nh_dev || |
1255 | !(nexthop_nh->nh_dev->flags & IFF_UP)) | 1255 | !(nexthop_nh->nh_dev->flags & IFF_UP)) |
1256 | continue; | 1256 | continue; |
1257 | if (nexthop_nh->nh_dev != dev || | 1257 | if (nexthop_nh->nh_dev != dev || |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 2c7c299ee2b9..9e4a3e3423b4 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -391,9 +391,9 @@ static void put_child(struct key_vector *tn, unsigned long i, | |||
391 | BUG_ON(i >= child_length(tn)); | 391 | BUG_ON(i >= child_length(tn)); |
392 | 392 | ||
393 | /* update emptyChildren, overflow into fullChildren */ | 393 | /* update emptyChildren, overflow into fullChildren */ |
394 | if (n == NULL && chi != NULL) | 394 | if (!n && chi != NULL) |
395 | empty_child_inc(tn); | 395 | empty_child_inc(tn); |
396 | if (n != NULL && chi == NULL) | 396 | if (n != NULL && !chi) |
397 | empty_child_dec(tn); | 397 | empty_child_dec(tn); |
398 | 398 | ||
399 | /* update fullChildren */ | 399 | /* update fullChildren */ |
@@ -528,7 +528,7 @@ static struct key_vector *inflate(struct trie *t, | |||
528 | unsigned long j, k; | 528 | unsigned long j, k; |
529 | 529 | ||
530 | /* An empty child */ | 530 | /* An empty child */ |
531 | if (inode == NULL) | 531 | if (!inode) |
532 | continue; | 532 | continue; |
533 | 533 | ||
534 | /* A leaf or an internal node with skipped bits */ | 534 | /* A leaf or an internal node with skipped bits */ |
@@ -1154,7 +1154,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1154 | } | 1154 | } |
1155 | err = -ENOBUFS; | 1155 | err = -ENOBUFS; |
1156 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); | 1156 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
1157 | if (new_fa == NULL) | 1157 | if (!new_fa) |
1158 | goto out; | 1158 | goto out; |
1159 | 1159 | ||
1160 | fi_drop = fa->fa_info; | 1160 | fi_drop = fa->fa_info; |
@@ -1204,7 +1204,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1204 | 1204 | ||
1205 | err = -ENOBUFS; | 1205 | err = -ENOBUFS; |
1206 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); | 1206 | new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); |
1207 | if (new_fa == NULL) | 1207 | if (!new_fa) |
1208 | goto out; | 1208 | goto out; |
1209 | 1209 | ||
1210 | new_fa->fa_info = fi; | 1210 | new_fa->fa_info = fi; |
@@ -1975,7 +1975,7 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias) | |||
1975 | sz += sizeof(struct trie); | 1975 | sz += sizeof(struct trie); |
1976 | 1976 | ||
1977 | tb = kzalloc(sz, GFP_KERNEL); | 1977 | tb = kzalloc(sz, GFP_KERNEL); |
1978 | if (tb == NULL) | 1978 | if (!tb) |
1979 | return NULL; | 1979 | return NULL; |
1980 | 1980 | ||
1981 | tb->tb_id = id; | 1981 | tb->tb_id = id; |
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 5a4828ba05ad..a7d8be3dd3de 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c | |||
@@ -196,7 +196,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, | |||
196 | 196 | ||
197 | rcu_read_lock(); | 197 | rcu_read_lock(); |
198 | ptype = gro_find_receive_by_type(type); | 198 | ptype = gro_find_receive_by_type(type); |
199 | if (ptype == NULL) { | 199 | if (!ptype) { |
200 | flush = 1; | 200 | flush = 1; |
201 | goto out_unlock; | 201 | goto out_unlock; |
202 | } | 202 | } |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 51973ddc05a6..9358f11aae40 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -149,7 +149,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, | |||
149 | 149 | ||
150 | rcu_read_lock(); | 150 | rcu_read_lock(); |
151 | ptype = gro_find_receive_by_type(type); | 151 | ptype = gro_find_receive_by_type(type); |
152 | if (ptype == NULL) | 152 | if (!ptype) |
153 | goto out_unlock; | 153 | goto out_unlock; |
154 | 154 | ||
155 | grehlen = GRE_HEADER_SECTION; | 155 | grehlen = GRE_HEADER_SECTION; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 5e564014a0b7..f5203fba6236 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -399,7 +399,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
399 | return; | 399 | return; |
400 | 400 | ||
401 | sk = icmp_xmit_lock(net); | 401 | sk = icmp_xmit_lock(net); |
402 | if (sk == NULL) | 402 | if (!sk) |
403 | return; | 403 | return; |
404 | inet = inet_sk(sk); | 404 | inet = inet_sk(sk); |
405 | 405 | ||
@@ -609,7 +609,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
609 | skb_in->data, | 609 | skb_in->data, |
610 | sizeof(_inner_type), | 610 | sizeof(_inner_type), |
611 | &_inner_type); | 611 | &_inner_type); |
612 | if (itp == NULL) | 612 | if (!itp) |
613 | goto out; | 613 | goto out; |
614 | 614 | ||
615 | /* | 615 | /* |
@@ -627,7 +627,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
627 | return; | 627 | return; |
628 | 628 | ||
629 | sk = icmp_xmit_lock(net); | 629 | sk = icmp_xmit_lock(net); |
630 | if (sk == NULL) | 630 | if (!sk) |
631 | goto out_free; | 631 | goto out_free; |
632 | 632 | ||
633 | /* | 633 | /* |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index ad09213ac5b2..27d204b834f9 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -692,7 +692,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
692 | hlen = LL_RESERVED_SPACE(dev); | 692 | hlen = LL_RESERVED_SPACE(dev); |
693 | tlen = dev->needed_tailroom; | 693 | tlen = dev->needed_tailroom; |
694 | skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); | 694 | skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); |
695 | if (skb == NULL) { | 695 | if (!skb) { |
696 | ip_rt_put(rt); | 696 | ip_rt_put(rt); |
697 | return -1; | 697 | return -1; |
698 | } | 698 | } |
@@ -981,7 +981,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
981 | int len = skb->len; | 981 | int len = skb->len; |
982 | bool dropped = true; | 982 | bool dropped = true; |
983 | 983 | ||
984 | if (in_dev == NULL) | 984 | if (!in_dev) |
985 | goto drop; | 985 | goto drop; |
986 | 986 | ||
987 | if (!pskb_may_pull(skb, sizeof(struct igmphdr))) | 987 | if (!pskb_may_pull(skb, sizeof(struct igmphdr))) |
@@ -1888,7 +1888,7 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) | |||
1888 | if (count >= sysctl_igmp_max_memberships) | 1888 | if (count >= sysctl_igmp_max_memberships) |
1889 | goto done; | 1889 | goto done; |
1890 | iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); | 1890 | iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); |
1891 | if (iml == NULL) | 1891 | if (!iml) |
1892 | goto done; | 1892 | goto done; |
1893 | 1893 | ||
1894 | memcpy(&iml->multi, imr, sizeof(*imr)); | 1894 | memcpy(&iml->multi, imr, sizeof(*imr)); |
@@ -1909,7 +1909,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, | |||
1909 | struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); | 1909 | struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); |
1910 | int err; | 1910 | int err; |
1911 | 1911 | ||
1912 | if (psf == NULL) { | 1912 | if (!psf) { |
1913 | /* any-source empty exclude case */ | 1913 | /* any-source empty exclude case */ |
1914 | return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, | 1914 | return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, |
1915 | iml->sfmode, 0, NULL, 0); | 1915 | iml->sfmode, 0, NULL, 0); |
@@ -2360,7 +2360,7 @@ void ip_mc_drop_socket(struct sock *sk) | |||
2360 | struct ip_mc_socklist *iml; | 2360 | struct ip_mc_socklist *iml; |
2361 | struct net *net = sock_net(sk); | 2361 | struct net *net = sock_net(sk); |
2362 | 2362 | ||
2363 | if (inet->mc_list == NULL) | 2363 | if (!inet->mc_list) |
2364 | return; | 2364 | return; |
2365 | 2365 | ||
2366 | rtnl_lock(); | 2366 | rtnl_lock(); |
@@ -2587,7 +2587,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) | |||
2587 | for_each_netdev_rcu(net, state->dev) { | 2587 | for_each_netdev_rcu(net, state->dev) { |
2588 | struct in_device *idev; | 2588 | struct in_device *idev; |
2589 | idev = __in_dev_get_rcu(state->dev); | 2589 | idev = __in_dev_get_rcu(state->dev); |
2590 | if (unlikely(idev == NULL)) | 2590 | if (unlikely(!idev)) |
2591 | continue; | 2591 | continue; |
2592 | im = rcu_dereference(idev->mc_list); | 2592 | im = rcu_dereference(idev->mc_list); |
2593 | if (likely(im != NULL)) { | 2593 | if (likely(im != NULL)) { |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index e7920352646a..5e346a082e5f 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -385,7 +385,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, | |||
385 | } | 385 | } |
386 | 386 | ||
387 | q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); | 387 | q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); |
388 | if (q == NULL) | 388 | if (!q) |
389 | return NULL; | 389 | return NULL; |
390 | 390 | ||
391 | q->net = nf; | 391 | q->net = nf; |
@@ -406,7 +406,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, | |||
406 | struct inet_frag_queue *q; | 406 | struct inet_frag_queue *q; |
407 | 407 | ||
408 | q = inet_frag_alloc(nf, f, arg); | 408 | q = inet_frag_alloc(nf, f, arg); |
409 | if (q == NULL) | 409 | if (!q) |
410 | return NULL; | 410 | return NULL; |
411 | 411 | ||
412 | return inet_frag_intern(nf, q, f, arg); | 412 | return inet_frag_intern(nf, q, f, arg); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 145a50c4d566..5a6cf8667a9d 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
372 | goto err; | 372 | goto err; |
373 | 373 | ||
374 | err = -ENOMEM; | 374 | err = -ENOMEM; |
375 | if (pskb_pull(skb, ihl) == NULL) | 375 | if (!pskb_pull(skb, ihl)) |
376 | goto err; | 376 | goto err; |
377 | 377 | ||
378 | err = pskb_trim_rcsum(skb, end - offset); | 378 | err = pskb_trim_rcsum(skb, end - offset); |
@@ -537,7 +537,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
537 | qp->q.fragments = head; | 537 | qp->q.fragments = head; |
538 | } | 538 | } |
539 | 539 | ||
540 | WARN_ON(head == NULL); | 540 | WARN_ON(!head); |
541 | WARN_ON(FRAG_CB(head)->offset != 0); | 541 | WARN_ON(FRAG_CB(head)->offset != 0); |
542 | 542 | ||
543 | /* Allocate a new buffer for the datagram. */ | 543 | /* Allocate a new buffer for the datagram. */ |
@@ -559,7 +559,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
559 | struct sk_buff *clone; | 559 | struct sk_buff *clone; |
560 | int i, plen = 0; | 560 | int i, plen = 0; |
561 | 561 | ||
562 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) | 562 | clone = alloc_skb(0, GFP_ATOMIC); |
563 | if (!clone) | ||
563 | goto out_nomem; | 564 | goto out_nomem; |
564 | clone->next = head->next; | 565 | clone->next = head->next; |
565 | head->next = clone; | 566 | head->next = clone; |
@@ -754,7 +755,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) | |||
754 | table = ip4_frags_ns_ctl_table; | 755 | table = ip4_frags_ns_ctl_table; |
755 | if (!net_eq(net, &init_net)) { | 756 | if (!net_eq(net, &init_net)) { |
756 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); | 757 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
757 | if (table == NULL) | 758 | if (!table) |
758 | goto err_alloc; | 759 | goto err_alloc; |
759 | 760 | ||
760 | table[0].data = &net->ipv4.frags.high_thresh; | 761 | table[0].data = &net->ipv4.frags.high_thresh; |
@@ -770,7 +771,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) | |||
770 | } | 771 | } |
771 | 772 | ||
772 | hdr = register_net_sysctl(net, "net/ipv4", table); | 773 | hdr = register_net_sysctl(net, "net/ipv4", table); |
773 | if (hdr == NULL) | 774 | if (!hdr) |
774 | goto err_reg; | 775 | goto err_reg; |
775 | 776 | ||
776 | net->ipv4.frags_hdr = hdr; | 777 | net->ipv4.frags_hdr = hdr; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 1060ca0bc23a..5fd706473c73 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -182,7 +182,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info, | |||
182 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, | 182 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, |
183 | iph->daddr, iph->saddr, tpi->key); | 183 | iph->daddr, iph->saddr, tpi->key); |
184 | 184 | ||
185 | if (t == NULL) | 185 | if (!t) |
186 | return PACKET_REJECT; | 186 | return PACKET_REJECT; |
187 | 187 | ||
188 | if (t->parms.iph.daddr == 0 || | 188 | if (t->parms.iph.daddr == 0 || |
@@ -423,7 +423,7 @@ static int ipgre_open(struct net_device *dev) | |||
423 | return -EADDRNOTAVAIL; | 423 | return -EADDRNOTAVAIL; |
424 | dev = rt->dst.dev; | 424 | dev = rt->dst.dev; |
425 | ip_rt_put(rt); | 425 | ip_rt_put(rt); |
426 | if (__in_dev_get_rtnl(dev) == NULL) | 426 | if (!__in_dev_get_rtnl(dev)) |
427 | return -EADDRNOTAVAIL; | 427 | return -EADDRNOTAVAIL; |
428 | t->mlink = dev->ifindex; | 428 | t->mlink = dev->ifindex; |
429 | ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); | 429 | ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 3d4da2c16b6a..00bed6fe3b66 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -314,7 +314,7 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
314 | const struct iphdr *iph = ip_hdr(skb); | 314 | const struct iphdr *iph = ip_hdr(skb); |
315 | struct rtable *rt; | 315 | struct rtable *rt; |
316 | 316 | ||
317 | if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { | 317 | if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) { |
318 | const struct net_protocol *ipprot; | 318 | const struct net_protocol *ipprot; |
319 | int protocol = iph->protocol; | 319 | int protocol = iph->protocol; |
320 | 320 | ||
@@ -387,7 +387,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
387 | 387 | ||
388 | IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len); | 388 | IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len); |
389 | 389 | ||
390 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { | 390 | skb = skb_share_check(skb, GFP_ATOMIC); |
391 | if (!skb) { | ||
391 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); | 392 | IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); |
392 | goto out; | 393 | goto out; |
393 | } | 394 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 8259e777b249..561d67b2ac74 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -182,7 +182,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) | |||
182 | struct sk_buff *skb2; | 182 | struct sk_buff *skb2; |
183 | 183 | ||
184 | skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); | 184 | skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); |
185 | if (skb2 == NULL) { | 185 | if (!skb2) { |
186 | kfree_skb(skb); | 186 | kfree_skb(skb); |
187 | return -ENOMEM; | 187 | return -ENOMEM; |
188 | } | 188 | } |
@@ -381,7 +381,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) | |||
381 | 381 | ||
382 | /* Make sure we can route this packet. */ | 382 | /* Make sure we can route this packet. */ |
383 | rt = (struct rtable *)__sk_dst_check(sk, 0); | 383 | rt = (struct rtable *)__sk_dst_check(sk, 0); |
384 | if (rt == NULL) { | 384 | if (!rt) { |
385 | __be32 daddr; | 385 | __be32 daddr; |
386 | 386 | ||
387 | /* Use correct destination address if we have options. */ | 387 | /* Use correct destination address if we have options. */ |
@@ -790,12 +790,13 @@ static inline int ip_ufo_append_data(struct sock *sk, | |||
790 | * device, so create one single skb packet containing complete | 790 | * device, so create one single skb packet containing complete |
791 | * udp datagram | 791 | * udp datagram |
792 | */ | 792 | */ |
793 | if ((skb = skb_peek_tail(queue)) == NULL) { | 793 | skb = skb_peek_tail(queue); |
794 | if (!skb) { | ||
794 | skb = sock_alloc_send_skb(sk, | 795 | skb = sock_alloc_send_skb(sk, |
795 | hh_len + fragheaderlen + transhdrlen + 20, | 796 | hh_len + fragheaderlen + transhdrlen + 20, |
796 | (flags & MSG_DONTWAIT), &err); | 797 | (flags & MSG_DONTWAIT), &err); |
797 | 798 | ||
798 | if (skb == NULL) | 799 | if (!skb) |
799 | return err; | 800 | return err; |
800 | 801 | ||
801 | /* reserve space for Hardware header */ | 802 | /* reserve space for Hardware header */ |
@@ -961,10 +962,10 @@ alloc_new_skb: | |||
961 | skb = sock_wmalloc(sk, | 962 | skb = sock_wmalloc(sk, |
962 | alloclen + hh_len + 15, 1, | 963 | alloclen + hh_len + 15, 1, |
963 | sk->sk_allocation); | 964 | sk->sk_allocation); |
964 | if (unlikely(skb == NULL)) | 965 | if (unlikely(!skb)) |
965 | err = -ENOBUFS; | 966 | err = -ENOBUFS; |
966 | } | 967 | } |
967 | if (skb == NULL) | 968 | if (!skb) |
968 | goto error; | 969 | goto error; |
969 | 970 | ||
970 | /* | 971 | /* |
@@ -1088,10 +1089,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, | |||
1088 | */ | 1089 | */ |
1089 | opt = ipc->opt; | 1090 | opt = ipc->opt; |
1090 | if (opt) { | 1091 | if (opt) { |
1091 | if (cork->opt == NULL) { | 1092 | if (!cork->opt) { |
1092 | cork->opt = kmalloc(sizeof(struct ip_options) + 40, | 1093 | cork->opt = kmalloc(sizeof(struct ip_options) + 40, |
1093 | sk->sk_allocation); | 1094 | sk->sk_allocation); |
1094 | if (unlikely(cork->opt == NULL)) | 1095 | if (unlikely(!cork->opt)) |
1095 | return -ENOBUFS; | 1096 | return -ENOBUFS; |
1096 | } | 1097 | } |
1097 | memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); | 1098 | memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); |
@@ -1198,7 +1199,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, | |||
1198 | return -EMSGSIZE; | 1199 | return -EMSGSIZE; |
1199 | } | 1200 | } |
1200 | 1201 | ||
1201 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) | 1202 | skb = skb_peek_tail(&sk->sk_write_queue); |
1203 | if (!skb) | ||
1202 | return -EINVAL; | 1204 | return -EINVAL; |
1203 | 1205 | ||
1204 | cork->length += size; | 1206 | cork->length += size; |
@@ -1329,7 +1331,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk, | |||
1329 | __be16 df = 0; | 1331 | __be16 df = 0; |
1330 | __u8 ttl; | 1332 | __u8 ttl; |
1331 | 1333 | ||
1332 | if ((skb = __skb_dequeue(queue)) == NULL) | 1334 | skb = __skb_dequeue(queue); |
1335 | if (!skb) | ||
1333 | goto out; | 1336 | goto out; |
1334 | tail_skb = &(skb_shinfo(skb)->frag_list); | 1337 | tail_skb = &(skb_shinfo(skb)->frag_list); |
1335 | 1338 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f6a0d54b308a..f64b1b24c64f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -351,7 +351,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
353 | } | 353 | } |
354 | if (new_ra == NULL) { | 354 | if (!new_ra) { |
355 | spin_unlock_bh(&ip_ra_lock); | 355 | spin_unlock_bh(&ip_ra_lock); |
356 | return -ENOBUFS; | 356 | return -ENOBUFS; |
357 | } | 357 | } |
@@ -482,7 +482,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
482 | 482 | ||
483 | err = -EAGAIN; | 483 | err = -EAGAIN; |
484 | skb = sock_dequeue_err_skb(sk); | 484 | skb = sock_dequeue_err_skb(sk); |
485 | if (skb == NULL) | 485 | if (!skb) |
486 | goto out; | 486 | goto out; |
487 | 487 | ||
488 | copied = skb->len; | 488 | copied = skb->len; |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 4bb7252110a6..31eaa9ba1803 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -654,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
654 | if (dst == 0) { | 654 | if (dst == 0) { |
655 | /* NBMA tunnel */ | 655 | /* NBMA tunnel */ |
656 | 656 | ||
657 | if (skb_dst(skb) == NULL) { | 657 | if (!skb_dst(skb)) { |
658 | dev->stats.tx_fifo_errors++; | 658 | dev->stats.tx_fifo_errors++; |
659 | goto tx_error; | 659 | goto tx_error; |
660 | } | 660 | } |
@@ -672,7 +672,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
672 | 672 | ||
673 | neigh = dst_neigh_lookup(skb_dst(skb), | 673 | neigh = dst_neigh_lookup(skb_dst(skb), |
674 | &ipv6_hdr(skb)->daddr); | 674 | &ipv6_hdr(skb)->daddr); |
675 | if (neigh == NULL) | 675 | if (!neigh) |
676 | goto tx_error; | 676 | goto tx_error; |
677 | 677 | ||
678 | addr6 = (const struct in6_addr *)&neigh->primary_key; | 678 | addr6 = (const struct in6_addr *)&neigh->primary_key; |
@@ -843,7 +843,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) | |||
843 | case SIOCGETTUNNEL: | 843 | case SIOCGETTUNNEL: |
844 | if (dev == itn->fb_tunnel_dev) { | 844 | if (dev == itn->fb_tunnel_dev) { |
845 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); | 845 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); |
846 | if (t == NULL) | 846 | if (!t) |
847 | t = netdev_priv(dev); | 847 | t = netdev_priv(dev); |
848 | } | 848 | } |
849 | memcpy(p, &t->parms, sizeof(*p)); | 849 | memcpy(p, &t->parms, sizeof(*p)); |
@@ -914,7 +914,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) | |||
914 | if (dev == itn->fb_tunnel_dev) { | 914 | if (dev == itn->fb_tunnel_dev) { |
915 | err = -ENOENT; | 915 | err = -ENOENT; |
916 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); | 916 | t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); |
917 | if (t == NULL) | 917 | if (!t) |
918 | goto done; | 918 | goto done; |
919 | err = -EPERM; | 919 | err = -EPERM; |
920 | if (t == netdev_priv(itn->fb_tunnel_dev)) | 920 | if (t == netdev_priv(itn->fb_tunnel_dev)) |
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index c0855d50a3fa..d97f4f2787f5 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -63,7 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) | |||
63 | struct xfrm_state *t; | 63 | struct xfrm_state *t; |
64 | 64 | ||
65 | t = xfrm_state_alloc(net); | 65 | t = xfrm_state_alloc(net); |
66 | if (t == NULL) | 66 | if (!t) |
67 | goto out; | 67 | goto out; |
68 | 68 | ||
69 | t->id.proto = IPPROTO_IPIP; | 69 | t->id.proto = IPPROTO_IPIP; |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index b26376ef87f6..8e7328c6a390 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -504,7 +504,8 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
504 | if (!net_eq(dev_net(dev), &init_net)) | 504 | if (!net_eq(dev_net(dev), &init_net)) |
505 | goto drop; | 505 | goto drop; |
506 | 506 | ||
507 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) | 507 | skb = skb_share_check(skb, GFP_ATOMIC); |
508 | if (!skb) | ||
508 | return NET_RX_DROP; | 509 | return NET_RX_DROP; |
509 | 510 | ||
510 | if (!pskb_may_pull(skb, sizeof(struct arphdr))) | 511 | if (!pskb_may_pull(skb, sizeof(struct arphdr))) |
@@ -958,7 +959,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
958 | if (skb->pkt_type == PACKET_OTHERHOST) | 959 | if (skb->pkt_type == PACKET_OTHERHOST) |
959 | goto drop; | 960 | goto drop; |
960 | 961 | ||
961 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) | 962 | skb = skb_share_check(skb, GFP_ATOMIC); |
963 | if (!skb) | ||
962 | return NET_RX_DROP; | 964 | return NET_RX_DROP; |
963 | 965 | ||
964 | if (!pskb_may_pull(skb, | 966 | if (!pskb_may_pull(skb, |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 5c81f6e40842..ff96396ebec5 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -144,7 +144,7 @@ static int ipip_err(struct sk_buff *skb, u32 info) | |||
144 | err = -ENOENT; | 144 | err = -ENOENT; |
145 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, | 145 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, |
146 | iph->daddr, iph->saddr, 0); | 146 | iph->daddr, iph->saddr, 0); |
147 | if (t == NULL) | 147 | if (!t) |
148 | goto out; | 148 | goto out; |
149 | 149 | ||
150 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { | 150 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 3ef30cf57f4a..a170e4bc9006 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -189,7 +189,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
189 | } | 189 | } |
190 | 190 | ||
191 | mrt = ipmr_get_table(rule->fr_net, rule->table); | 191 | mrt = ipmr_get_table(rule->fr_net, rule->table); |
192 | if (mrt == NULL) | 192 | if (!mrt) |
193 | return -EAGAIN; | 193 | return -EAGAIN; |
194 | res->mrt = mrt; | 194 | res->mrt = mrt; |
195 | return 0; | 195 | return 0; |
@@ -253,7 +253,7 @@ static int __net_init ipmr_rules_init(struct net *net) | |||
253 | INIT_LIST_HEAD(&net->ipv4.mr_tables); | 253 | INIT_LIST_HEAD(&net->ipv4.mr_tables); |
254 | 254 | ||
255 | mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); | 255 | mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); |
256 | if (mrt == NULL) { | 256 | if (!mrt) { |
257 | err = -ENOMEM; | 257 | err = -ENOMEM; |
258 | goto err1; | 258 | goto err1; |
259 | } | 259 | } |
@@ -320,7 +320,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) | |||
320 | return mrt; | 320 | return mrt; |
321 | 321 | ||
322 | mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); | 322 | mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); |
323 | if (mrt == NULL) | 323 | if (!mrt) |
324 | return NULL; | 324 | return NULL; |
325 | write_pnet(&mrt->net, net); | 325 | write_pnet(&mrt->net, net); |
326 | mrt->id = id; | 326 | mrt->id = id; |
@@ -422,7 +422,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) | |||
422 | dev->flags |= IFF_MULTICAST; | 422 | dev->flags |= IFF_MULTICAST; |
423 | 423 | ||
424 | in_dev = __in_dev_get_rtnl(dev); | 424 | in_dev = __in_dev_get_rtnl(dev); |
425 | if (in_dev == NULL) | 425 | if (!in_dev) |
426 | goto failure; | 426 | goto failure; |
427 | 427 | ||
428 | ipv4_devconf_setall(in_dev); | 428 | ipv4_devconf_setall(in_dev); |
@@ -506,7 +506,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) | |||
506 | 506 | ||
507 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); | 507 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); |
508 | 508 | ||
509 | if (dev == NULL) | 509 | if (!dev) |
510 | return NULL; | 510 | return NULL; |
511 | 511 | ||
512 | dev_net_set(dev, net); | 512 | dev_net_set(dev, net); |
@@ -762,7 +762,7 @@ static int vif_add(struct net *net, struct mr_table *mrt, | |||
762 | case 0: | 762 | case 0: |
763 | if (vifc->vifc_flags == VIFF_USE_IFINDEX) { | 763 | if (vifc->vifc_flags == VIFF_USE_IFINDEX) { |
764 | dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); | 764 | dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); |
765 | if (dev && __in_dev_get_rtnl(dev) == NULL) { | 765 | if (dev && !__in_dev_get_rtnl(dev)) { |
766 | dev_put(dev); | 766 | dev_put(dev); |
767 | return -EADDRNOTAVAIL; | 767 | return -EADDRNOTAVAIL; |
768 | } | 768 | } |
@@ -1008,7 +1008,7 @@ static int ipmr_cache_report(struct mr_table *mrt, | |||
1008 | 1008 | ||
1009 | rcu_read_lock(); | 1009 | rcu_read_lock(); |
1010 | mroute_sk = rcu_dereference(mrt->mroute_sk); | 1010 | mroute_sk = rcu_dereference(mrt->mroute_sk); |
1011 | if (mroute_sk == NULL) { | 1011 | if (!mroute_sk) { |
1012 | rcu_read_unlock(); | 1012 | rcu_read_unlock(); |
1013 | kfree_skb(skb); | 1013 | kfree_skb(skb); |
1014 | return -EINVAL; | 1014 | return -EINVAL; |
@@ -1161,7 +1161,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, | |||
1161 | return -EINVAL; | 1161 | return -EINVAL; |
1162 | 1162 | ||
1163 | c = ipmr_cache_alloc(); | 1163 | c = ipmr_cache_alloc(); |
1164 | if (c == NULL) | 1164 | if (!c) |
1165 | return -ENOMEM; | 1165 | return -ENOMEM; |
1166 | 1166 | ||
1167 | c->mfc_origin = mfc->mfcc_origin.s_addr; | 1167 | c->mfc_origin = mfc->mfcc_origin.s_addr; |
@@ -1283,7 +1283,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi | |||
1283 | return -EOPNOTSUPP; | 1283 | return -EOPNOTSUPP; |
1284 | 1284 | ||
1285 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1285 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1286 | if (mrt == NULL) | 1286 | if (!mrt) |
1287 | return -ENOENT; | 1287 | return -ENOENT; |
1288 | 1288 | ||
1289 | if (optname != MRT_INIT) { | 1289 | if (optname != MRT_INIT) { |
@@ -1446,7 +1446,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int | |||
1446 | return -EOPNOTSUPP; | 1446 | return -EOPNOTSUPP; |
1447 | 1447 | ||
1448 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1448 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1449 | if (mrt == NULL) | 1449 | if (!mrt) |
1450 | return -ENOENT; | 1450 | return -ENOENT; |
1451 | 1451 | ||
1452 | if (optname != MRT_VERSION && | 1452 | if (optname != MRT_VERSION && |
@@ -1492,7 +1492,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | |||
1492 | struct mr_table *mrt; | 1492 | struct mr_table *mrt; |
1493 | 1493 | ||
1494 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1494 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1495 | if (mrt == NULL) | 1495 | if (!mrt) |
1496 | return -ENOENT; | 1496 | return -ENOENT; |
1497 | 1497 | ||
1498 | switch (cmd) { | 1498 | switch (cmd) { |
@@ -1566,7 +1566,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) | |||
1566 | struct mr_table *mrt; | 1566 | struct mr_table *mrt; |
1567 | 1567 | ||
1568 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); | 1568 | mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); |
1569 | if (mrt == NULL) | 1569 | if (!mrt) |
1570 | return -ENOENT; | 1570 | return -ENOENT; |
1571 | 1571 | ||
1572 | switch (cmd) { | 1572 | switch (cmd) { |
@@ -1701,7 +1701,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1701 | struct flowi4 fl4; | 1701 | struct flowi4 fl4; |
1702 | int encap = 0; | 1702 | int encap = 0; |
1703 | 1703 | ||
1704 | if (vif->dev == NULL) | 1704 | if (!vif->dev) |
1705 | goto out_free; | 1705 | goto out_free; |
1706 | 1706 | ||
1707 | #ifdef CONFIG_IP_PIMSM | 1707 | #ifdef CONFIG_IP_PIMSM |
@@ -1992,7 +1992,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1992 | 1992 | ||
1993 | /* already under rcu_read_lock() */ | 1993 | /* already under rcu_read_lock() */ |
1994 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); | 1994 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); |
1995 | if (cache == NULL) { | 1995 | if (!cache) { |
1996 | int vif = ipmr_find_vif(mrt, skb->dev); | 1996 | int vif = ipmr_find_vif(mrt, skb->dev); |
1997 | 1997 | ||
1998 | if (vif >= 0) | 1998 | if (vif >= 0) |
@@ -2003,13 +2003,13 @@ int ip_mr_input(struct sk_buff *skb) | |||
2003 | /* | 2003 | /* |
2004 | * No usable cache entry | 2004 | * No usable cache entry |
2005 | */ | 2005 | */ |
2006 | if (cache == NULL) { | 2006 | if (!cache) { |
2007 | int vif; | 2007 | int vif; |
2008 | 2008 | ||
2009 | if (local) { | 2009 | if (local) { |
2010 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 2010 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
2011 | ip_local_deliver(skb); | 2011 | ip_local_deliver(skb); |
2012 | if (skb2 == NULL) | 2012 | if (!skb2) |
2013 | return -ENOBUFS; | 2013 | return -ENOBUFS; |
2014 | skb = skb2; | 2014 | skb = skb2; |
2015 | } | 2015 | } |
@@ -2068,7 +2068,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, | |||
2068 | reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; | 2068 | reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; |
2069 | read_unlock(&mrt_lock); | 2069 | read_unlock(&mrt_lock); |
2070 | 2070 | ||
2071 | if (reg_dev == NULL) | 2071 | if (!reg_dev) |
2072 | return 1; | 2072 | return 1; |
2073 | 2073 | ||
2074 | skb->mac_header = skb->network_header; | 2074 | skb->mac_header = skb->network_header; |
@@ -2198,18 +2198,18 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, | |||
2198 | int err; | 2198 | int err; |
2199 | 2199 | ||
2200 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | 2200 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); |
2201 | if (mrt == NULL) | 2201 | if (!mrt) |
2202 | return -ENOENT; | 2202 | return -ENOENT; |
2203 | 2203 | ||
2204 | rcu_read_lock(); | 2204 | rcu_read_lock(); |
2205 | cache = ipmr_cache_find(mrt, saddr, daddr); | 2205 | cache = ipmr_cache_find(mrt, saddr, daddr); |
2206 | if (cache == NULL && skb->dev) { | 2206 | if (!cache && skb->dev) { |
2207 | int vif = ipmr_find_vif(mrt, skb->dev); | 2207 | int vif = ipmr_find_vif(mrt, skb->dev); |
2208 | 2208 | ||
2209 | if (vif >= 0) | 2209 | if (vif >= 0) |
2210 | cache = ipmr_cache_find_any(mrt, daddr, vif); | 2210 | cache = ipmr_cache_find_any(mrt, daddr, vif); |
2211 | } | 2211 | } |
2212 | if (cache == NULL) { | 2212 | if (!cache) { |
2213 | struct sk_buff *skb2; | 2213 | struct sk_buff *skb2; |
2214 | struct iphdr *iph; | 2214 | struct iphdr *iph; |
2215 | struct net_device *dev; | 2215 | struct net_device *dev; |
@@ -2267,7 +2267,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | |||
2267 | int err; | 2267 | int err; |
2268 | 2268 | ||
2269 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); | 2269 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); |
2270 | if (nlh == NULL) | 2270 | if (!nlh) |
2271 | return -EMSGSIZE; | 2271 | return -EMSGSIZE; |
2272 | 2272 | ||
2273 | rtm = nlmsg_data(nlh); | 2273 | rtm = nlmsg_data(nlh); |
@@ -2332,7 +2332,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, | |||
2332 | 2332 | ||
2333 | skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif), | 2333 | skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif), |
2334 | GFP_ATOMIC); | 2334 | GFP_ATOMIC); |
2335 | if (skb == NULL) | 2335 | if (!skb) |
2336 | goto errout; | 2336 | goto errout; |
2337 | 2337 | ||
2338 | err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); | 2338 | err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); |
@@ -2447,7 +2447,7 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) | |||
2447 | struct mr_table *mrt; | 2447 | struct mr_table *mrt; |
2448 | 2448 | ||
2449 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | 2449 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); |
2450 | if (mrt == NULL) | 2450 | if (!mrt) |
2451 | return ERR_PTR(-ENOENT); | 2451 | return ERR_PTR(-ENOENT); |
2452 | 2452 | ||
2453 | iter->mrt = mrt; | 2453 | iter->mrt = mrt; |
@@ -2566,7 +2566,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | |||
2566 | struct mr_table *mrt; | 2566 | struct mr_table *mrt; |
2567 | 2567 | ||
2568 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); | 2568 | mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); |
2569 | if (mrt == NULL) | 2569 | if (!mrt) |
2570 | return ERR_PTR(-ENOENT); | 2570 | return ERR_PTR(-ENOENT); |
2571 | 2571 | ||
2572 | it->mrt = mrt; | 2572 | it->mrt = mrt; |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 344e7cdfb8d4..2dcd2e60df64 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -516,7 +516,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info) | |||
516 | ntohs(icmph->un.echo.sequence)); | 516 | ntohs(icmph->un.echo.sequence)); |
517 | 517 | ||
518 | sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); | 518 | sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); |
519 | if (sk == NULL) { | 519 | if (!sk) { |
520 | pr_debug("no socket, dropping\n"); | 520 | pr_debug("no socket, dropping\n"); |
521 | return; /* No socket for error */ | 521 | return; /* No socket for error */ |
522 | } | 522 | } |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 56946f47d446..46a78204189d 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -363,7 +363,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, | |||
363 | skb = sock_alloc_send_skb(sk, | 363 | skb = sock_alloc_send_skb(sk, |
364 | length + hlen + tlen + 15, | 364 | length + hlen + tlen + 15, |
365 | flags & MSG_DONTWAIT, &err); | 365 | flags & MSG_DONTWAIT, &err); |
366 | if (skb == NULL) | 366 | if (!skb) |
367 | goto error; | 367 | goto error; |
368 | skb_reserve(skb, hlen); | 368 | skb_reserve(skb, hlen); |
369 | 369 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 652b92ebd7ba..26a1cb348b3d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1056,7 +1056,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) | |||
1056 | __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); | 1056 | __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); |
1057 | 1057 | ||
1058 | rt = (struct rtable *)odst; | 1058 | rt = (struct rtable *)odst; |
1059 | if (odst->obsolete && odst->ops->check(odst, 0) == NULL) { | 1059 | if (odst->obsolete && !odst->ops->check(odst, 0)) { |
1060 | rt = ip_route_output_flow(sock_net(sk), &fl4, sk); | 1060 | rt = ip_route_output_flow(sock_net(sk), &fl4, sk); |
1061 | if (IS_ERR(rt)) | 1061 | if (IS_ERR(rt)) |
1062 | goto out; | 1062 | goto out; |
@@ -1450,7 +1450,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1450 | 1450 | ||
1451 | /* Primary sanity checks. */ | 1451 | /* Primary sanity checks. */ |
1452 | 1452 | ||
1453 | if (in_dev == NULL) | 1453 | if (!in_dev) |
1454 | return -EINVAL; | 1454 | return -EINVAL; |
1455 | 1455 | ||
1456 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || | 1456 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || |
@@ -1553,7 +1553,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1553 | 1553 | ||
1554 | /* get a working reference to the output device */ | 1554 | /* get a working reference to the output device */ |
1555 | out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); | 1555 | out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); |
1556 | if (out_dev == NULL) { | 1556 | if (!out_dev) { |
1557 | net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); | 1557 | net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); |
1558 | return -EINVAL; | 1558 | return -EINVAL; |
1559 | } | 1559 | } |
@@ -2054,7 +2054,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2054 | ipv4_is_lbcast(fl4->daddr))) { | 2054 | ipv4_is_lbcast(fl4->daddr))) { |
2055 | /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ | 2055 | /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ |
2056 | dev_out = __ip_dev_find(net, fl4->saddr, false); | 2056 | dev_out = __ip_dev_find(net, fl4->saddr, false); |
2057 | if (dev_out == NULL) | 2057 | if (!dev_out) |
2058 | goto out; | 2058 | goto out; |
2059 | 2059 | ||
2060 | /* Special hack: user can direct multicasts | 2060 | /* Special hack: user can direct multicasts |
@@ -2087,7 +2087,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) | |||
2087 | if (fl4->flowi4_oif) { | 2087 | if (fl4->flowi4_oif) { |
2088 | dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); | 2088 | dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); |
2089 | rth = ERR_PTR(-ENODEV); | 2089 | rth = ERR_PTR(-ENODEV); |
2090 | if (dev_out == NULL) | 2090 | if (!dev_out) |
2091 | goto out; | 2091 | goto out; |
2092 | 2092 | ||
2093 | /* RACE: Check return value of inet_select_addr instead. */ | 2093 | /* RACE: Check return value of inet_select_addr instead. */ |
@@ -2299,7 +2299,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
2299 | u32 metrics[RTAX_MAX]; | 2299 | u32 metrics[RTAX_MAX]; |
2300 | 2300 | ||
2301 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); | 2301 | nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); |
2302 | if (nlh == NULL) | 2302 | if (!nlh) |
2303 | return -EMSGSIZE; | 2303 | return -EMSGSIZE; |
2304 | 2304 | ||
2305 | r = nlmsg_data(nlh); | 2305 | r = nlmsg_data(nlh); |
@@ -2421,7 +2421,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) | |||
2421 | rtm = nlmsg_data(nlh); | 2421 | rtm = nlmsg_data(nlh); |
2422 | 2422 | ||
2423 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 2423 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
2424 | if (skb == NULL) { | 2424 | if (!skb) { |
2425 | err = -ENOBUFS; | 2425 | err = -ENOBUFS; |
2426 | goto errout; | 2426 | goto errout; |
2427 | } | 2427 | } |
@@ -2452,7 +2452,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) | |||
2452 | struct net_device *dev; | 2452 | struct net_device *dev; |
2453 | 2453 | ||
2454 | dev = __dev_get_by_index(net, iif); | 2454 | dev = __dev_get_by_index(net, iif); |
2455 | if (dev == NULL) { | 2455 | if (!dev) { |
2456 | err = -ENODEV; | 2456 | err = -ENODEV; |
2457 | goto errout_free; | 2457 | goto errout_free; |
2458 | } | 2458 | } |
@@ -2651,7 +2651,7 @@ static __net_init int sysctl_route_net_init(struct net *net) | |||
2651 | tbl = ipv4_route_flush_table; | 2651 | tbl = ipv4_route_flush_table; |
2652 | if (!net_eq(net, &init_net)) { | 2652 | if (!net_eq(net, &init_net)) { |
2653 | tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); | 2653 | tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); |
2654 | if (tbl == NULL) | 2654 | if (!tbl) |
2655 | goto err_dup; | 2655 | goto err_dup; |
2656 | 2656 | ||
2657 | /* Don't export sysctls to unprivileged users */ | 2657 | /* Don't export sysctls to unprivileged users */ |
@@ -2661,7 +2661,7 @@ static __net_init int sysctl_route_net_init(struct net *net) | |||
2661 | tbl[0].extra1 = net; | 2661 | tbl[0].extra1 = net; |
2662 | 2662 | ||
2663 | net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); | 2663 | net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); |
2664 | if (net->ipv4.route_hdr == NULL) | 2664 | if (!net->ipv4.route_hdr) |
2665 | goto err_reg; | 2665 | goto err_reg; |
2666 | return 0; | 2666 | return 0; |
2667 | 2667 | ||
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index fdf899163d44..c3852a7ff3c7 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -909,7 +909,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
909 | int i; | 909 | int i; |
910 | 910 | ||
911 | table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); | 911 | table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); |
912 | if (table == NULL) | 912 | if (!table) |
913 | goto err_alloc; | 913 | goto err_alloc; |
914 | 914 | ||
915 | /* Update the variables to point into the current struct net */ | 915 | /* Update the variables to point into the current struct net */ |
@@ -918,7 +918,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
918 | } | 918 | } |
919 | 919 | ||
920 | net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); | 920 | net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); |
921 | if (net->ipv4.ipv4_hdr == NULL) | 921 | if (!net->ipv4.ipv4_hdr) |
922 | goto err_reg; | 922 | goto err_reg; |
923 | 923 | ||
924 | net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); | 924 | net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); |
@@ -956,7 +956,7 @@ static __init int sysctl_ipv4_init(void) | |||
956 | struct ctl_table_header *hdr; | 956 | struct ctl_table_header *hdr; |
957 | 957 | ||
958 | hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); | 958 | hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); |
959 | if (hdr == NULL) | 959 | if (!hdr) |
960 | return -ENOMEM; | 960 | return -ENOMEM; |
961 | 961 | ||
962 | if (register_pernet_subsys(&ipv4_sysctl_ops)) { | 962 | if (register_pernet_subsys(&ipv4_sysctl_ops)) { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index dbd51cefaf02..5bd809bfd0aa 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1047,7 +1047,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, | |||
1047 | 1047 | ||
1048 | tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), | 1048 | tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), |
1049 | sk->sk_allocation); | 1049 | sk->sk_allocation); |
1050 | if (unlikely(tp->fastopen_req == NULL)) | 1050 | if (unlikely(!tp->fastopen_req)) |
1051 | return -ENOBUFS; | 1051 | return -ENOBUFS; |
1052 | tp->fastopen_req->data = msg; | 1052 | tp->fastopen_req->data = msg; |
1053 | tp->fastopen_req->size = size; | 1053 | tp->fastopen_req->size = size; |
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 2eb887ec0ce3..5da55e2b5cd2 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c | |||
@@ -141,7 +141,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
141 | req->sk = NULL; | 141 | req->sk = NULL; |
142 | 142 | ||
143 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | 143 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); |
144 | if (child == NULL) | 144 | if (!child) |
145 | return false; | 145 | return false; |
146 | 146 | ||
147 | spin_lock(&queue->fastopenq->lock); | 147 | spin_lock(&queue->fastopenq->lock); |
@@ -214,7 +214,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
214 | sk->sk_data_ready(sk); | 214 | sk->sk_data_ready(sk); |
215 | bh_unlock_sock(child); | 215 | bh_unlock_sock(child); |
216 | sock_put(child); | 216 | sock_put(child); |
217 | WARN_ON(req->sk == NULL); | 217 | WARN_ON(!req->sk); |
218 | return true; | 218 | return true; |
219 | } | 219 | } |
220 | 220 | ||
@@ -233,7 +233,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk) | |||
233 | * temporarily vs a server not supporting Fast Open at all. | 233 | * temporarily vs a server not supporting Fast Open at all. |
234 | */ | 234 | */ |
235 | fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq; | 235 | fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq; |
236 | if (fastopenq == NULL || fastopenq->max_qlen == 0) | 236 | if (!fastopenq || fastopenq->max_qlen == 0) |
237 | return false; | 237 | return false; |
238 | 238 | ||
239 | if (fastopenq->qlen >= fastopenq->max_qlen) { | 239 | if (fastopenq->qlen >= fastopenq->max_qlen) { |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 18b80e8bc533..1fd283684303 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
866 | /* This must be called before lost_out is incremented */ | 866 | /* This must be called before lost_out is incremented */ |
867 | static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) | 867 | static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) |
868 | { | 868 | { |
869 | if ((tp->retransmit_skb_hint == NULL) || | 869 | if (!tp->retransmit_skb_hint || |
870 | before(TCP_SKB_CB(skb)->seq, | 870 | before(TCP_SKB_CB(skb)->seq, |
871 | TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) | 871 | TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) |
872 | tp->retransmit_skb_hint = skb; | 872 | tp->retransmit_skb_hint = skb; |
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, | |||
1614 | struct tcp_sacktag_state *state, | 1614 | struct tcp_sacktag_state *state, |
1615 | u32 skip_to_seq) | 1615 | u32 skip_to_seq) |
1616 | { | 1616 | { |
1617 | if (next_dup == NULL) | 1617 | if (!next_dup) |
1618 | return skb; | 1618 | return skb; |
1619 | 1619 | ||
1620 | if (before(next_dup->start_seq, skip_to_seq)) { | 1620 | if (before(next_dup->start_seq, skip_to_seq)) { |
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, | |||
1783 | if (tcp_highest_sack_seq(tp) == cache->end_seq) { | 1783 | if (tcp_highest_sack_seq(tp) == cache->end_seq) { |
1784 | /* ...but better entrypoint exists! */ | 1784 | /* ...but better entrypoint exists! */ |
1785 | skb = tcp_highest_sack(sk); | 1785 | skb = tcp_highest_sack(sk); |
1786 | if (skb == NULL) | 1786 | if (!skb) |
1787 | break; | 1787 | break; |
1788 | state.fack_count = tp->fackets_out; | 1788 | state.fack_count = tp->fackets_out; |
1789 | cache++; | 1789 | cache++; |
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, | |||
1798 | 1798 | ||
1799 | if (!before(start_seq, tcp_highest_sack_seq(tp))) { | 1799 | if (!before(start_seq, tcp_highest_sack_seq(tp))) { |
1800 | skb = tcp_highest_sack(sk); | 1800 | skb = tcp_highest_sack(sk); |
1801 | if (skb == NULL) | 1801 | if (!skb) |
1802 | break; | 1802 | break; |
1803 | state.fack_count = tp->fackets_out; | 1803 | state.fack_count = tp->fackets_out; |
1804 | } | 1804 | } |
@@ -3698,7 +3698,7 @@ void tcp_parse_options(const struct sk_buff *skb, | |||
3698 | */ | 3698 | */ |
3699 | if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || | 3699 | if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || |
3700 | get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || | 3700 | get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || |
3701 | foc == NULL || !th->syn || (opsize & 1)) | 3701 | !foc || !th->syn || (opsize & 1)) |
3702 | break; | 3702 | break; |
3703 | foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; | 3703 | foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; |
3704 | if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && | 3704 | if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && |
@@ -4669,7 +4669,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
4669 | struct sk_buff *head; | 4669 | struct sk_buff *head; |
4670 | u32 start, end; | 4670 | u32 start, end; |
4671 | 4671 | ||
4672 | if (skb == NULL) | 4672 | if (!skb) |
4673 | return; | 4673 | return; |
4674 | 4674 | ||
4675 | start = TCP_SKB_CB(skb)->seq; | 4675 | start = TCP_SKB_CB(skb)->seq; |
@@ -5124,7 +5124,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
5124 | { | 5124 | { |
5125 | struct tcp_sock *tp = tcp_sk(sk); | 5125 | struct tcp_sock *tp = tcp_sk(sk); |
5126 | 5126 | ||
5127 | if (unlikely(sk->sk_rx_dst == NULL)) | 5127 | if (unlikely(!sk->sk_rx_dst)) |
5128 | inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); | 5128 | inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); |
5129 | /* | 5129 | /* |
5130 | * Header prediction. | 5130 | * Header prediction. |
@@ -5694,7 +5694,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5694 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && | 5694 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
5695 | sk->sk_state != TCP_FIN_WAIT1); | 5695 | sk->sk_state != TCP_FIN_WAIT1); |
5696 | 5696 | ||
5697 | if (tcp_check_req(sk, skb, req, true) == NULL) | 5697 | if (!tcp_check_req(sk, skb, req, true)) |
5698 | goto discard; | 5698 | goto discard; |
5699 | } | 5699 | } |
5700 | 5700 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 69f9cf684744..9ff311cf00f3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -122,7 +122,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) | |||
122 | and use initial timestamp retrieved from peer table. | 122 | and use initial timestamp retrieved from peer table. |
123 | */ | 123 | */ |
124 | if (tcptw->tw_ts_recent_stamp && | 124 | if (tcptw->tw_ts_recent_stamp && |
125 | (twp == NULL || (sysctl_tcp_tw_reuse && | 125 | (!twp || (sysctl_tcp_tw_reuse && |
126 | get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { | 126 | get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { |
127 | tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; | 127 | tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; |
128 | if (tp->write_seq == 0) | 128 | if (tp->write_seq == 0) |
@@ -494,7 +494,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
494 | /* Only in fast or simultaneous open. If a fast open socket is | 494 | /* Only in fast or simultaneous open. If a fast open socket is |
495 | * is already accepted it is treated as a connected one below. | 495 | * is already accepted it is treated as a connected one below. |
496 | */ | 496 | */ |
497 | if (fastopen && fastopen->sk == NULL) | 497 | if (fastopen && !fastopen->sk) |
498 | break; | 498 | break; |
499 | 499 | ||
500 | if (!sock_owned_by_user(sk)) { | 500 | if (!sock_owned_by_user(sk)) { |
@@ -1390,7 +1390,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1390 | sk_mark_napi_id(sk, skb); | 1390 | sk_mark_napi_id(sk, skb); |
1391 | if (dst) { | 1391 | if (dst) { |
1392 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || | 1392 | if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || |
1393 | dst->ops->check(dst, 0) == NULL) { | 1393 | !dst->ops->check(dst, 0)) { |
1394 | dst_release(dst); | 1394 | dst_release(dst); |
1395 | sk->sk_rx_dst = NULL; | 1395 | sk->sk_rx_dst = NULL; |
1396 | } | 1396 | } |
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 71ec14c87579..78ecc4a01712 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c | |||
@@ -505,7 +505,7 @@ void tcp_init_metrics(struct sock *sk) | |||
505 | struct tcp_metrics_block *tm; | 505 | struct tcp_metrics_block *tm; |
506 | u32 val, crtt = 0; /* cached RTT scaled by 8 */ | 506 | u32 val, crtt = 0; /* cached RTT scaled by 8 */ |
507 | 507 | ||
508 | if (dst == NULL) | 508 | if (!dst) |
509 | goto reset; | 509 | goto reset; |
510 | 510 | ||
511 | dst_confirm(dst); | 511 | dst_confirm(dst); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 274e96fb369b..f0db1599a09c 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -763,7 +763,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
763 | * socket is created, wait for troubles. | 763 | * socket is created, wait for troubles. |
764 | */ | 764 | */ |
765 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | 765 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); |
766 | if (child == NULL) | 766 | if (!child) |
767 | goto listen_overflow; | 767 | goto listen_overflow; |
768 | 768 | ||
769 | inet_csk_reqsk_queue_unlink(sk, req); | 769 | inet_csk_reqsk_queue_unlink(sk, req); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2e69b8d16e68..bdc80734cd2c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -565,7 +565,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
565 | opts->mss = tcp_advertise_mss(sk); | 565 | opts->mss = tcp_advertise_mss(sk); |
566 | remaining -= TCPOLEN_MSS_ALIGNED; | 566 | remaining -= TCPOLEN_MSS_ALIGNED; |
567 | 567 | ||
568 | if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { | 568 | if (likely(sysctl_tcp_timestamps && !*md5)) { |
569 | opts->options |= OPTION_TS; | 569 | opts->options |= OPTION_TS; |
570 | opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; | 570 | opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; |
571 | opts->tsecr = tp->rx_opt.ts_recent; | 571 | opts->tsecr = tp->rx_opt.ts_recent; |
@@ -1148,7 +1148,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, | |||
1148 | 1148 | ||
1149 | /* Get a new skb... force flag on. */ | 1149 | /* Get a new skb... force flag on. */ |
1150 | buff = sk_stream_alloc_skb(sk, nsize, gfp); | 1150 | buff = sk_stream_alloc_skb(sk, nsize, gfp); |
1151 | if (buff == NULL) | 1151 | if (!buff) |
1152 | return -ENOMEM; /* We'll just try again later. */ | 1152 | return -ENOMEM; /* We'll just try again later. */ |
1153 | 1153 | ||
1154 | sk->sk_wmem_queued += buff->truesize; | 1154 | sk->sk_wmem_queued += buff->truesize; |
@@ -1707,7 +1707,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
1707 | return tcp_fragment(sk, skb, len, mss_now, gfp); | 1707 | return tcp_fragment(sk, skb, len, mss_now, gfp); |
1708 | 1708 | ||
1709 | buff = sk_stream_alloc_skb(sk, 0, gfp); | 1709 | buff = sk_stream_alloc_skb(sk, 0, gfp); |
1710 | if (unlikely(buff == NULL)) | 1710 | if (unlikely(!buff)) |
1711 | return -ENOMEM; | 1711 | return -ENOMEM; |
1712 | 1712 | ||
1713 | sk->sk_wmem_queued += buff->truesize; | 1713 | sk->sk_wmem_queued += buff->truesize; |
@@ -1925,7 +1925,8 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1925 | } | 1925 | } |
1926 | 1926 | ||
1927 | /* We're allowed to probe. Build it now. */ | 1927 | /* We're allowed to probe. Build it now. */ |
1928 | if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) | 1928 | nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC); |
1929 | if (!nskb) | ||
1929 | return -1; | 1930 | return -1; |
1930 | sk->sk_wmem_queued += nskb->truesize; | 1931 | sk->sk_wmem_queued += nskb->truesize; |
1931 | sk_mem_charge(sk, nskb->truesize); | 1932 | sk_mem_charge(sk, nskb->truesize); |
@@ -2733,7 +2734,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2733 | if (skb == tcp_send_head(sk)) | 2734 | if (skb == tcp_send_head(sk)) |
2734 | break; | 2735 | break; |
2735 | /* we could do better than to assign each time */ | 2736 | /* we could do better than to assign each time */ |
2736 | if (hole == NULL) | 2737 | if (!hole) |
2737 | tp->retransmit_skb_hint = skb; | 2738 | tp->retransmit_skb_hint = skb; |
2738 | 2739 | ||
2739 | /* Assume this retransmit will generate | 2740 | /* Assume this retransmit will generate |
@@ -2765,7 +2766,7 @@ begin_fwd: | |||
2765 | goto begin_fwd; | 2766 | goto begin_fwd; |
2766 | 2767 | ||
2767 | } else if (!(sacked & TCPCB_LOST)) { | 2768 | } else if (!(sacked & TCPCB_LOST)) { |
2768 | if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) | 2769 | if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) |
2769 | hole = skb; | 2770 | hole = skb; |
2770 | continue; | 2771 | continue; |
2771 | 2772 | ||
@@ -2868,14 +2869,14 @@ int tcp_send_synack(struct sock *sk) | |||
2868 | struct sk_buff *skb; | 2869 | struct sk_buff *skb; |
2869 | 2870 | ||
2870 | skb = tcp_write_queue_head(sk); | 2871 | skb = tcp_write_queue_head(sk); |
2871 | if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { | 2872 | if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { |
2872 | pr_debug("%s: wrong queue state\n", __func__); | 2873 | pr_debug("%s: wrong queue state\n", __func__); |
2873 | return -EFAULT; | 2874 | return -EFAULT; |
2874 | } | 2875 | } |
2875 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { | 2876 | if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { |
2876 | if (skb_cloned(skb)) { | 2877 | if (skb_cloned(skb)) { |
2877 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); | 2878 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); |
2878 | if (nskb == NULL) | 2879 | if (!nskb) |
2879 | return -ENOMEM; | 2880 | return -ENOMEM; |
2880 | tcp_unlink_write_queue(skb, sk); | 2881 | tcp_unlink_write_queue(skb, sk); |
2881 | __skb_header_release(nskb); | 2882 | __skb_header_release(nskb); |
@@ -3300,7 +3301,7 @@ void tcp_send_ack(struct sock *sk) | |||
3300 | * sock. | 3301 | * sock. |
3301 | */ | 3302 | */ |
3302 | buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); | 3303 | buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3303 | if (buff == NULL) { | 3304 | if (!buff) { |
3304 | inet_csk_schedule_ack(sk); | 3305 | inet_csk_schedule_ack(sk); |
3305 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; | 3306 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
3306 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | 3307 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
@@ -3344,7 +3345,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) | |||
3344 | 3345 | ||
3345 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ | 3346 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ |
3346 | skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); | 3347 | skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); |
3347 | if (skb == NULL) | 3348 | if (!skb) |
3348 | return -1; | 3349 | return -1; |
3349 | 3350 | ||
3350 | /* Reserve space for headers and set control bits. */ | 3351 | /* Reserve space for headers and set control bits. */ |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 294af16633af..9f525a2a68df 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -633,7 +633,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
633 | 633 | ||
634 | sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, | 634 | sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, |
635 | iph->saddr, uh->source, skb->dev->ifindex, udptable); | 635 | iph->saddr, uh->source, skb->dev->ifindex, udptable); |
636 | if (sk == NULL) { | 636 | if (!sk) { |
637 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); | 637 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
638 | return; /* No socket for error */ | 638 | return; /* No socket for error */ |
639 | } | 639 | } |
@@ -1011,7 +1011,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
1011 | if (connected) | 1011 | if (connected) |
1012 | rt = (struct rtable *)sk_dst_check(sk, 0); | 1012 | rt = (struct rtable *)sk_dst_check(sk, 0); |
1013 | 1013 | ||
1014 | if (rt == NULL) { | 1014 | if (!rt) { |
1015 | struct net *net = sock_net(sk); | 1015 | struct net *net = sock_net(sk); |
1016 | 1016 | ||
1017 | fl4 = &fl4_stack; | 1017 | fl4 = &fl4_stack; |
@@ -1619,7 +1619,7 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
1619 | 1619 | ||
1620 | for (i = 0; i < count; i++) { | 1620 | for (i = 0; i < count; i++) { |
1621 | sk = stack[i]; | 1621 | sk = stack[i]; |
1622 | if (likely(skb1 == NULL)) | 1622 | if (likely(!skb1)) |
1623 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | 1623 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); |
1624 | 1624 | ||
1625 | if (!skb1) { | 1625 | if (!skb1) { |
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 2dbfc1f1f7b3..b763c39ae1d7 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c | |||
@@ -58,7 +58,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, | |||
58 | goto out_nosk; | 58 | goto out_nosk; |
59 | 59 | ||
60 | err = -ENOENT; | 60 | err = -ENOENT; |
61 | if (sk == NULL) | 61 | if (!sk) |
62 | goto out_nosk; | 62 | goto out_nosk; |
63 | 63 | ||
64 | err = sock_diag_check_cookie(sk, req->id.idiag_cookie); | 64 | err = sock_diag_check_cookie(sk, req->id.idiag_cookie); |
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index aac6197b7a71..cac7468db0a1 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -24,7 +24,7 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) | |||
24 | 24 | ||
25 | static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) | 25 | static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) |
26 | { | 26 | { |
27 | if (skb_dst(skb) == NULL) { | 27 | if (!skb_dst(skb)) { |
28 | const struct iphdr *iph = ip_hdr(skb); | 28 | const struct iphdr *iph = ip_hdr(skb); |
29 | 29 | ||
30 | if (ip_route_input_noref(skb, iph->daddr, iph->saddr, | 30 | if (ip_route_input_noref(skb, iph->daddr, iph->saddr, |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index c224c856247b..bff69746e05f 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -298,7 +298,7 @@ static void __net_exit xfrm4_net_exit(struct net *net) | |||
298 | { | 298 | { |
299 | struct ctl_table *table; | 299 | struct ctl_table *table; |
300 | 300 | ||
301 | if (net->ipv4.xfrm4_hdr == NULL) | 301 | if (!net->ipv4.xfrm4_hdr) |
302 | return; | 302 | return; |
303 | 303 | ||
304 | table = net->ipv4.xfrm4_hdr->ctl_table_arg; | 304 | table = net->ipv4.xfrm4_hdr->ctl_table_arg; |