aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/cipso_ipv4.c6
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/fib_trie.c4
-rw-r--r--net/ipv4/geneve.c2
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/igmp.c8
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c3
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_output.c14
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udp_offload.c4
30 files changed, 64 insertions, 63 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7d3b00c01bc8..8b47a4d79d04 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1269,7 +1269,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1269 if (udpfrag) { 1269 if (udpfrag) {
1270 iph->id = htons(id); 1270 iph->id = htons(id);
1271 iph->frag_off = htons(offset >> 3); 1271 iph->frag_off = htons(offset >> 3);
1272 if (skb->next != NULL) 1272 if (skb->next)
1273 iph->frag_off |= htons(IP_MF); 1273 iph->frag_off |= htons(IP_MF);
1274 offset += skb->len - nhoff - ihl; 1274 offset += skb->len - nhoff - ihl;
1275 } else { 1275 } else {
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index ffe84226a2c8..c6e67aa46c32 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -569,7 +569,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
569 break; 569 break;
570#endif 570#endif
571 default: 571 default:
572 if (target_hw != NULL) 572 if (target_hw)
573 memcpy(arp_ptr, target_hw, dev->addr_len); 573 memcpy(arp_ptr, target_hw, dev->addr_len);
574 else 574 else
575 memset(arp_ptr, 0, dev->addr_len); 575 memset(arp_ptr, 0, dev->addr_len);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 1b28e1183c1b..bdb2a07ec363 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -502,7 +502,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
502 atomic_set(&doi_def->refcount, 1); 502 atomic_set(&doi_def->refcount, 1);
503 503
504 spin_lock(&cipso_v4_doi_list_lock); 504 spin_lock(&cipso_v4_doi_list_lock);
505 if (cipso_v4_doi_search(doi_def->doi) != NULL) { 505 if (cipso_v4_doi_search(doi_def->doi)) {
506 spin_unlock(&cipso_v4_doi_list_lock); 506 spin_unlock(&cipso_v4_doi_list_lock);
507 ret_val = -EEXIST; 507 ret_val = -EEXIST;
508 goto doi_add_return; 508 goto doi_add_return;
@@ -513,7 +513,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
513 513
514doi_add_return: 514doi_add_return:
515 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); 515 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
516 if (audit_buf != NULL) { 516 if (audit_buf) {
517 const char *type_str; 517 const char *type_str;
518 switch (doi_type) { 518 switch (doi_type) {
519 case CIPSO_V4_MAP_TRANS: 519 case CIPSO_V4_MAP_TRANS:
@@ -617,7 +617,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
617 617
618doi_remove_return: 618doi_remove_return:
619 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); 619 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
620 if (audit_buf != NULL) { 620 if (audit_buf) {
621 audit_log_format(audit_buf, 621 audit_log_format(audit_buf,
622 " cipso_doi=%u res=%u", 622 " cipso_doi=%u res=%u",
623 doi, ret_val == 0 ? 1 : 0); 623 doi, ret_val == 0 ? 1 : 0);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 0ee21689d37e..419d23c53ec7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1290,7 +1290,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1290 __be32 addr = 0; 1290 __be32 addr = 0;
1291 struct net_device *dev; 1291 struct net_device *dev;
1292 1292
1293 if (in_dev != NULL) 1293 if (in_dev)
1294 return confirm_addr_indev(in_dev, dst, local, scope); 1294 return confirm_addr_indev(in_dev, dst, local, scope);
1295 1295
1296 rcu_read_lock(); 1296 rcu_read_lock();
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 9e4a3e3423b4..e13fcc602da2 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -391,9 +391,9 @@ static void put_child(struct key_vector *tn, unsigned long i,
391 BUG_ON(i >= child_length(tn)); 391 BUG_ON(i >= child_length(tn));
392 392
393 /* update emptyChildren, overflow into fullChildren */ 393 /* update emptyChildren, overflow into fullChildren */
394 if (!n && chi != NULL) 394 if (!n && chi)
395 empty_child_inc(tn); 395 empty_child_inc(tn);
396 if (n != NULL && !chi) 396 if (n && !chi)
397 empty_child_dec(tn); 397 empty_child_dec(tn);
398 398
399 /* update fullChildren */ 399 /* update fullChildren */
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index a7d8be3dd3de..e64f8e9785d1 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -230,7 +230,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
230 230
231 rcu_read_lock(); 231 rcu_read_lock();
232 ptype = gro_find_complete_by_type(type); 232 ptype = gro_find_complete_by_type(type);
233 if (ptype != NULL) 233 if (ptype)
234 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 234 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
235 235
236 rcu_read_unlock(); 236 rcu_read_unlock();
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 9358f11aae40..5aa46d4b44ef 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -243,7 +243,7 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
243 243
244 rcu_read_lock(); 244 rcu_read_lock();
245 ptype = gro_find_complete_by_type(type); 245 ptype = gro_find_complete_by_type(type);
246 if (ptype != NULL) 246 if (ptype)
247 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 247 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
248 248
249 rcu_read_unlock(); 249 rcu_read_unlock();
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 27d204b834f9..a3a697f5ffba 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2370,7 +2370,7 @@ void ip_mc_drop_socket(struct sock *sk)
2370 inet->mc_list = iml->next_rcu; 2370 inet->mc_list = iml->next_rcu;
2371 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2371 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2372 (void) ip_mc_leave_src(sk, iml, in_dev); 2372 (void) ip_mc_leave_src(sk, iml, in_dev);
2373 if (in_dev != NULL) 2373 if (in_dev)
2374 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2374 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2375 /* decrease mem now to avoid the memleak warning */ 2375 /* decrease mem now to avoid the memleak warning */
2376 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2376 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
@@ -2590,10 +2590,10 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2590 if (unlikely(!idev)) 2590 if (unlikely(!idev))
2591 continue; 2591 continue;
2592 im = rcu_dereference(idev->mc_list); 2592 im = rcu_dereference(idev->mc_list);
2593 if (likely(im != NULL)) { 2593 if (likely(im)) {
2594 spin_lock_bh(&im->lock); 2594 spin_lock_bh(&im->lock);
2595 psf = im->sources; 2595 psf = im->sources;
2596 if (likely(psf != NULL)) { 2596 if (likely(psf)) {
2597 state->im = im; 2597 state->im = im;
2598 state->idev = idev; 2598 state->idev = idev;
2599 break; 2599 break;
@@ -2663,7 +2663,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2663 __releases(rcu) 2663 __releases(rcu)
2664{ 2664{
2665 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2665 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2666 if (likely(state->im != NULL)) { 2666 if (likely(state->im)) {
2667 spin_unlock_bh(&state->im->lock); 2667 spin_unlock_bh(&state->im->lock);
2668 state->im = NULL; 2668 state->im = NULL;
2669 } 2669 }
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 79c0c9439fdc..5c3dd6267ed3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -673,7 +673,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
673{ 673{
674 struct sock *newsk = sk_clone_lock(sk, priority); 674 struct sock *newsk = sk_clone_lock(sk, priority);
675 675
676 if (newsk != NULL) { 676 if (newsk) {
677 struct inet_connection_sock *newicsk = inet_csk(newsk); 677 struct inet_connection_sock *newicsk = inet_csk(newsk);
678 678
679 newsk->sk_state = TCP_SYN_RECV; 679 newsk->sk_state = TCP_SYN_RECV;
@@ -843,7 +843,7 @@ void inet_csk_listen_stop(struct sock *sk)
843 sk_acceptq_removed(sk); 843 sk_acceptq_removed(sk);
844 reqsk_put(req); 844 reqsk_put(req);
845 } 845 }
846 if (queue->fastopenq != NULL) { 846 if (queue->fastopenq) {
847 /* Free all the reqs queued in rskq_rst_head. */ 847 /* Free all the reqs queued in rskq_rst_head. */
848 spin_lock_bh(&queue->fastopenq->lock); 848 spin_lock_bh(&queue->fastopenq->lock);
849 acc_req = queue->fastopenq->rskq_rst_head; 849 acc_req = queue->fastopenq->rskq_rst_head;
@@ -875,7 +875,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
875{ 875{
876 const struct inet_connection_sock *icsk = inet_csk(sk); 876 const struct inet_connection_sock *icsk = inet_csk(sk);
877 877
878 if (icsk->icsk_af_ops->compat_getsockopt != NULL) 878 if (icsk->icsk_af_ops->compat_getsockopt)
879 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, 879 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
880 optval, optlen); 880 optval, optlen);
881 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 881 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
@@ -888,7 +888,7 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
888{ 888{
889 const struct inet_connection_sock *icsk = inet_csk(sk); 889 const struct inet_connection_sock *icsk = inet_csk(sk);
890 890
891 if (icsk->icsk_af_ops->compat_setsockopt != NULL) 891 if (icsk->icsk_af_ops->compat_setsockopt)
892 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, 892 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
893 optval, optlen); 893 optval, optlen);
894 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 894 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0fb841b9d834..d4630bf2d9aa 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -64,7 +64,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
64{ 64{
65 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 65 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
66 66
67 if (tb != NULL) { 67 if (tb) {
68 write_pnet(&tb->ib_net, net); 68 write_pnet(&tb->ib_net, net);
69 tb->port = snum; 69 tb->port = snum;
70 tb->fastreuse = 0; 70 tb->fastreuse = 0;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index f38e387448fb..118f0f195820 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -173,7 +173,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
173 struct inet_timewait_sock *tw = 173 struct inet_timewait_sock *tw =
174 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, 174 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
175 GFP_ATOMIC); 175 GFP_ATOMIC);
176 if (tw != NULL) { 176 if (tw) {
177 const struct inet_sock *inet = inet_sk(sk); 177 const struct inet_sock *inet = inet_sk(sk);
178 178
179 kmemcheck_annotate_bitfield(tw, flags); 179 kmemcheck_annotate_bitfield(tw, flags);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 5a6cf8667a9d..cc1da6d9cb35 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -639,7 +639,8 @@ int ip_defrag(struct sk_buff *skb, u32 user)
639 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 639 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
640 640
641 /* Lookup (or create) queue header */ 641 /* Lookup (or create) queue header */
642 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { 642 qp = ip_find(net, ip_hdr(skb), user);
643 if (qp) {
643 int ret; 644 int ret;
644 645
645 spin_lock(&qp->q.lock); 646 spin_lock(&qp->q.lock);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 00bed6fe3b66..2e0410ed8f16 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -203,7 +203,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
203 raw = raw_local_deliver(skb, protocol); 203 raw = raw_local_deliver(skb, protocol);
204 204
205 ipprot = rcu_dereference(inet_protos[protocol]); 205 ipprot = rcu_dereference(inet_protos[protocol]);
206 if (ipprot != NULL) { 206 if (ipprot) {
207 int ret; 207 int ret;
208 208
209 if (!ipprot->no_policy) { 209 if (!ipprot->no_policy) {
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5b3d91be2db0..bd246792360b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -264,7 +264,7 @@ int ip_options_compile(struct net *net,
264 unsigned char *iph; 264 unsigned char *iph;
265 int optlen, l; 265 int optlen, l;
266 266
267 if (skb != NULL) { 267 if (skb) {
268 rt = skb_rtable(skb); 268 rt = skb_rtable(skb);
269 optptr = (unsigned char *)&(ip_hdr(skb)[1]); 269 optptr = (unsigned char *)&(ip_hdr(skb)[1]);
270 } else 270 } else
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 561d67b2ac74..26f6f7956168 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -257,7 +257,7 @@ static int ip_finish_output(struct sk_buff *skb)
257{ 257{
258#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 258#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
259 /* Policy lookup after SNAT yielded a new policy */ 259 /* Policy lookup after SNAT yielded a new policy */
260 if (skb_dst(skb)->xfrm != NULL) { 260 if (skb_dst(skb)->xfrm) {
261 IPCB(skb)->flags |= IPSKB_REROUTED; 261 IPCB(skb)->flags |= IPSKB_REROUTED;
262 return dst_output(skb); 262 return dst_output(skb);
263 } 263 }
@@ -376,7 +376,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
376 inet_opt = rcu_dereference(inet->inet_opt); 376 inet_opt = rcu_dereference(inet->inet_opt);
377 fl4 = &fl->u.ip4; 377 fl4 = &fl->u.ip4;
378 rt = skb_rtable(skb); 378 rt = skb_rtable(skb);
379 if (rt != NULL) 379 if (rt)
380 goto packet_routed; 380 goto packet_routed;
381 381
382 /* Make sure we can route this packet. */ 382 /* Make sure we can route this packet. */
@@ -587,7 +587,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
587 ip_options_fragment(frag); 587 ip_options_fragment(frag);
588 offset += skb->len - hlen; 588 offset += skb->len - hlen;
589 iph->frag_off = htons(offset>>3); 589 iph->frag_off = htons(offset>>3);
590 if (frag->next != NULL) 590 if (frag->next)
591 iph->frag_off |= htons(IP_MF); 591 iph->frag_off |= htons(IP_MF);
592 /* Ready, complete checksum */ 592 /* Ready, complete checksum */
593 ip_send_check(iph); 593 ip_send_check(iph);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index f64b1b24c64f..7cfb0893f263 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -387,7 +387,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
387 skb_network_header(skb); 387 skb_network_header(skb);
388 serr->port = port; 388 serr->port = port;
389 389
390 if (skb_pull(skb, payload - skb->data) != NULL) { 390 if (skb_pull(skb, payload - skb->data)) {
391 skb_reset_transport_header(skb); 391 skb_reset_transport_header(skb);
392 if (sock_queue_err_skb(sk, skb) == 0) 392 if (sock_queue_err_skb(sk, skb) == 0)
393 return; 393 return;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 31eaa9ba1803..6d364ab8e14e 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -876,7 +876,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
876 break; 876 break;
877 } 877 }
878 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 878 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
879 if (t != NULL) { 879 if (t) {
880 if (t->dev != dev) { 880 if (t->dev != dev) {
881 err = -EEXIST; 881 err = -EEXIST;
882 break; 882 break;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index c4f93c0d1104..9f7269f3c54a 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -60,7 +60,7 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
60 60
61 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 61 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
62 iph->saddr, iph->daddr, 0); 62 iph->saddr, iph->daddr, 0);
63 if (tunnel != NULL) { 63 if (tunnel) {
64 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 64 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
65 goto drop; 65 goto drop;
66 66
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a170e4bc9006..c204b728bbc1 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -316,7 +316,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
316 unsigned int i; 316 unsigned int i;
317 317
318 mrt = ipmr_get_table(net, id); 318 mrt = ipmr_get_table(net, id);
319 if (mrt != NULL) 319 if (mrt)
320 return mrt; 320 return mrt;
321 321
322 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); 322 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2dcd2e60df64..a93f260cf24c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -971,7 +971,7 @@ bool ping_rcv(struct sk_buff *skb)
971 skb_push(skb, skb->data - (u8 *)icmph); 971 skb_push(skb, skb->data - (u8 *)icmph);
972 972
973 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); 973 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
974 if (sk != NULL) { 974 if (sk) {
975 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 975 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
976 976
977 pr_debug("rcv on socket %p\n", sk); 977 pr_debug("rcv on socket %p\n", sk);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 46a78204189d..6d0fa8fb8af0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -293,7 +293,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
293 293
294 read_lock(&raw_v4_hashinfo.lock); 294 read_lock(&raw_v4_hashinfo.lock);
295 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); 295 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
296 if (raw_sk != NULL) { 296 if (raw_sk) {
297 iph = (const struct iphdr *)skb->data; 297 iph = (const struct iphdr *)skb->data;
298 net = dev_net(skb->dev); 298 net = dev_net(skb->dev);
299 299
@@ -872,7 +872,7 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
872 872
873 spin_lock_bh(&sk->sk_receive_queue.lock); 873 spin_lock_bh(&sk->sk_receive_queue.lock);
874 skb = skb_peek(&sk->sk_receive_queue); 874 skb = skb_peek(&sk->sk_receive_queue);
875 if (skb != NULL) 875 if (skb)
876 amount = skb->len; 876 amount = skb->len;
877 spin_unlock_bh(&sk->sk_receive_queue.lock); 877 spin_unlock_bh(&sk->sk_receive_queue.lock);
878 return put_user(amount, (int __user *)arg); 878 return put_user(amount, (int __user *)arg);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 26a1cb348b3d..a78540f28276 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1591,7 +1591,7 @@ static int __mkroute_input(struct sk_buff *skb,
1591 1591
1592 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1592 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1593 if (do_cache) { 1593 if (do_cache) {
1594 if (fnhe != NULL) 1594 if (fnhe)
1595 rth = rcu_dereference(fnhe->fnhe_rth_input); 1595 rth = rcu_dereference(fnhe->fnhe_rth_input);
1596 else 1596 else
1597 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1597 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5bd809bfd0aa..094a6822c71d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -496,7 +496,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
496 496
497 /* Connected or passive Fast Open socket? */ 497 /* Connected or passive Fast Open socket? */
498 if (sk->sk_state != TCP_SYN_SENT && 498 if (sk->sk_state != TCP_SYN_SENT &&
499 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) { 499 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
500 int target = sock_rcvlowat(sk, 0, INT_MAX); 500 int target = sock_rcvlowat(sk, 0, INT_MAX);
501 501
502 if (tp->urg_seq == tp->copied_seq && 502 if (tp->urg_seq == tp->copied_seq &&
@@ -1028,7 +1028,7 @@ static inline int select_size(const struct sock *sk, bool sg)
1028 1028
1029void tcp_free_fastopen_req(struct tcp_sock *tp) 1029void tcp_free_fastopen_req(struct tcp_sock *tp)
1030{ 1030{
1031 if (tp->fastopen_req != NULL) { 1031 if (tp->fastopen_req) {
1032 kfree(tp->fastopen_req); 1032 kfree(tp->fastopen_req);
1033 tp->fastopen_req = NULL; 1033 tp->fastopen_req = NULL;
1034 } 1034 }
@@ -1042,7 +1042,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1042 1042
1043 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1043 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1044 return -EOPNOTSUPP; 1044 return -EOPNOTSUPP;
1045 if (tp->fastopen_req != NULL) 1045 if (tp->fastopen_req)
1046 return -EALREADY; /* Another Fast Open is in progress */ 1046 return -EALREADY; /* Another Fast Open is in progress */
1047 1047
1048 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1048 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
@@ -2138,7 +2138,7 @@ adjudge_to_death:
2138 * aborted (e.g., closed with unread data) before 3WHS 2138 * aborted (e.g., closed with unread data) before 3WHS
2139 * finishes. 2139 * finishes.
2140 */ 2140 */
2141 if (req != NULL) 2141 if (req)
2142 reqsk_fastopen_remove(sk, req, false); 2142 reqsk_fastopen_remove(sk, req, false);
2143 inet_csk_destroy_sock(sk); 2143 inet_csk_destroy_sock(sk);
2144 } 2144 }
@@ -2776,7 +2776,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2776 break; 2776 break;
2777 2777
2778 case TCP_FASTOPEN: 2778 case TCP_FASTOPEN:
2779 if (icsk->icsk_accept_queue.fastopenq != NULL) 2779 if (icsk->icsk_accept_queue.fastopenq)
2780 val = icsk->icsk_accept_queue.fastopenq->max_qlen; 2780 val = icsk->icsk_accept_queue.fastopenq->max_qlen;
2781 else 2781 else
2782 val = 0; 2782 val = 0;
@@ -2960,7 +2960,7 @@ void tcp_done(struct sock *sk)
2960 2960
2961 tcp_set_state(sk, TCP_CLOSE); 2961 tcp_set_state(sk, TCP_CLOSE);
2962 tcp_clear_xmit_timers(sk); 2962 tcp_clear_xmit_timers(sk);
2963 if (req != NULL) 2963 if (req)
2964 reqsk_fastopen_remove(sk, req, false); 2964 reqsk_fastopen_remove(sk, req, false);
2965 2965
2966 sk->sk_shutdown = SHUTDOWN_MASK; 2966 sk->sk_shutdown = SHUTDOWN_MASK;
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 86dc119a3815..79b34a0f4a4a 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -29,7 +29,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
29 r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 29 r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
30 r->idiag_wqueue = tp->write_seq - tp->snd_una; 30 r->idiag_wqueue = tp->write_seq - tp->snd_una;
31 } 31 }
32 if (info != NULL) 32 if (info)
33 tcp_get_info(sk, info); 33 tcp_get_info(sk, info);
34} 34}
35 35
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1fd283684303..df7e7fa12733 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1256 fack_count += pcount; 1256 fack_count += pcount;
1257 1257
1258 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1258 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1259 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1259 if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
1260 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1260 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1261 tp->lost_cnt_hint += pcount; 1261 tp->lost_cnt_hint += pcount;
1262 1262
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1535 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1535 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1536 break; 1536 break;
1537 1537
1538 if ((next_dup != NULL) && 1538 if (next_dup &&
1539 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1539 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1540 in_sack = tcp_match_skb_to_sack(sk, skb, 1540 in_sack = tcp_match_skb_to_sack(sk, skb,
1541 next_dup->start_seq, 1541 next_dup->start_seq,
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1551 if (in_sack <= 0) { 1551 if (in_sack <= 0) {
1552 tmp = tcp_shift_skb_data(sk, skb, state, 1552 tmp = tcp_shift_skb_data(sk, skb, state,
1553 start_seq, end_seq, dup_sack); 1553 start_seq, end_seq, dup_sack);
1554 if (tmp != NULL) { 1554 if (tmp) {
1555 if (tmp != skb) { 1555 if (tmp != skb) {
1556 skb = tmp; 1556 skb = tmp;
1557 continue; 1557 continue;
@@ -5321,7 +5321,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5321 5321
5322 tcp_set_state(sk, TCP_ESTABLISHED); 5322 tcp_set_state(sk, TCP_ESTABLISHED);
5323 5323
5324 if (skb != NULL) { 5324 if (skb) {
5325 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5325 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
5326 security_inet_conn_established(sk, skb); 5326 security_inet_conn_established(sk, skb);
5327 } 5327 }
@@ -5690,7 +5690,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5690 } 5690 }
5691 5691
5692 req = tp->fastopen_rsk; 5692 req = tp->fastopen_rsk;
5693 if (req != NULL) { 5693 if (req) {
5694 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 5694 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
5695 sk->sk_state != TCP_FIN_WAIT1); 5695 sk->sk_state != TCP_FIN_WAIT1);
5696 5696
@@ -5780,7 +5780,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5780 * ACK we have received, this would have acknowledged 5780 * ACK we have received, this would have acknowledged
5781 * our SYNACK so stop the SYNACK timer. 5781 * our SYNACK so stop the SYNACK timer.
5782 */ 5782 */
5783 if (req != NULL) { 5783 if (req) {
5784 /* Return RST if ack_seq is invalid. 5784 /* Return RST if ack_seq is invalid.
5785 * Note that RFC793 only says to generate a 5785 * Note that RFC793 only says to generate a
5786 * DUPACK for it but for TCP Fast Open it seems 5786 * DUPACK for it but for TCP Fast Open it seems
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9ff311cf00f3..560f9571f7c4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1305,7 +1305,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1305 /* Copy over the MD5 key from the original socket */ 1305 /* Copy over the MD5 key from the original socket */
1306 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, 1306 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1307 AF_INET); 1307 AF_INET);
1308 if (key != NULL) { 1308 if (key) {
1309 /* 1309 /*
1310 * We're using one, so create a matching key 1310 * We're using one, so create a matching key
1311 * on the newsk structure. If we fail to get 1311 * on the newsk structure. If we fail to get
@@ -1797,7 +1797,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
1797 if (inet_csk(sk)->icsk_bind_hash) 1797 if (inet_csk(sk)->icsk_bind_hash)
1798 inet_put_port(sk); 1798 inet_put_port(sk);
1799 1799
1800 BUG_ON(tp->fastopen_rsk != NULL); 1800 BUG_ON(tp->fastopen_rsk);
1801 1801
1802 /* If socket is aborted during connect operation */ 1802 /* If socket is aborted during connect operation */
1803 tcp_free_fastopen_req(tp); 1803 tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f0db1599a09c..d7003911c894 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -294,7 +294,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
294 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) 294 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
295 tw = inet_twsk_alloc(sk, state); 295 tw = inet_twsk_alloc(sk, state);
296 296
297 if (tw != NULL) { 297 if (tw) {
298 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 298 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
299 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 299 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
300 struct inet_sock *inet = inet_sk(sk); 300 struct inet_sock *inet = inet_sk(sk);
@@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
332 struct tcp_md5sig_key *key; 332 struct tcp_md5sig_key *key;
333 tcptw->tw_md5_key = NULL; 333 tcptw->tw_md5_key = NULL;
334 key = tp->af_specific->md5_lookup(sk, sk); 334 key = tp->af_specific->md5_lookup(sk, sk);
335 if (key != NULL) { 335 if (key) {
336 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 336 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
337 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()) 337 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
338 BUG(); 338 BUG();
@@ -454,7 +454,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
454{ 454{
455 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 455 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
456 456
457 if (newsk != NULL) { 457 if (newsk) {
458 const struct inet_request_sock *ireq = inet_rsk(req); 458 const struct inet_request_sock *ireq = inet_rsk(req);
459 struct tcp_request_sock *treq = tcp_rsk(req); 459 struct tcp_request_sock *treq = tcp_rsk(req);
460 struct inet_connection_sock *newicsk = inet_csk(newsk); 460 struct inet_connection_sock *newicsk = inet_csk(newsk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bdc80734cd2c..7404e5238e00 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -641,7 +641,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
641 if (unlikely(!ireq->tstamp_ok)) 641 if (unlikely(!ireq->tstamp_ok))
642 remaining -= TCPOLEN_SACKPERM_ALIGNED; 642 remaining -= TCPOLEN_SACKPERM_ALIGNED;
643 } 643 }
644 if (foc != NULL && foc->len >= 0) { 644 if (foc && foc->len >= 0) {
645 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 645 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
646 need = (need + 3) & ~3U; /* Align to 32 bits */ 646 need = (need + 3) & ~3U; /* Align to 32 bits */
647 if (remaining >= need) { 647 if (remaining >= need) {
@@ -2224,7 +2224,7 @@ void tcp_send_loss_probe(struct sock *sk)
2224 int mss = tcp_current_mss(sk); 2224 int mss = tcp_current_mss(sk);
2225 int err = -1; 2225 int err = -1;
2226 2226
2227 if (tcp_send_head(sk) != NULL) { 2227 if (tcp_send_head(sk)) {
2228 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 2228 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2229 goto rearm_timer; 2229 goto rearm_timer;
2230 } 2230 }
@@ -2758,7 +2758,7 @@ begin_fwd:
2758 if (!tcp_can_forward_retransmit(sk)) 2758 if (!tcp_can_forward_retransmit(sk))
2759 break; 2759 break;
2760 /* Backtrack if necessary to non-L'ed skb */ 2760 /* Backtrack if necessary to non-L'ed skb */
2761 if (hole != NULL) { 2761 if (hole) {
2762 skb = hole; 2762 skb = hole;
2763 hole = NULL; 2763 hole = NULL;
2764 } 2764 }
@@ -2811,7 +2811,7 @@ void tcp_send_fin(struct sock *sk)
2811 */ 2811 */
2812 mss_now = tcp_current_mss(sk); 2812 mss_now = tcp_current_mss(sk);
2813 2813
2814 if (tcp_send_head(sk) != NULL) { 2814 if (tcp_send_head(sk)) {
2815 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 2815 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
2816 TCP_SKB_CB(skb)->end_seq++; 2816 TCP_SKB_CB(skb)->end_seq++;
2817 tp->write_seq++; 2817 tp->write_seq++;
@@ -3015,7 +3015,7 @@ static void tcp_connect_init(struct sock *sk)
3015 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 3015 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
3016 3016
3017#ifdef CONFIG_TCP_MD5SIG 3017#ifdef CONFIG_TCP_MD5SIG
3018 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 3018 if (tp->af_specific->md5_lookup(sk, sk))
3019 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 3019 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3020#endif 3020#endif
3021 3021
@@ -3376,8 +3376,8 @@ int tcp_write_wakeup(struct sock *sk)
3376 if (sk->sk_state == TCP_CLOSE) 3376 if (sk->sk_state == TCP_CLOSE)
3377 return -1; 3377 return -1;
3378 3378
3379 if ((skb = tcp_send_head(sk)) != NULL && 3379 skb = tcp_send_head(sk);
3380 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 3380 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
3381 int err; 3381 int err;
3382 unsigned int mss = tcp_current_mss(sk); 3382 unsigned int mss = tcp_current_mss(sk);
3383 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 3383 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9f525a2a68df..2162fc6ce1c1 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1522,7 +1522,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1522 1522
1523 /* if we're overly short, let UDP handle it */ 1523 /* if we're overly short, let UDP handle it */
1524 encap_rcv = ACCESS_ONCE(up->encap_rcv); 1524 encap_rcv = ACCESS_ONCE(up->encap_rcv);
1525 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { 1525 if (skb->len > sizeof(struct udphdr) && encap_rcv) {
1526 int ret; 1526 int ret;
1527 1527
1528 /* Verify checksum before giving to encap */ 1528 /* Verify checksum before giving to encap */
@@ -1802,7 +1802,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1802 saddr, daddr, udptable, proto); 1802 saddr, daddr, udptable, proto);
1803 1803
1804 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1804 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1805 if (sk != NULL) { 1805 if (sk) {
1806 int ret; 1806 int ret;
1807 1807
1808 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 1808 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 4915d8284a86..f9386160cbee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -285,7 +285,7 @@ void udp_del_offload(struct udp_offload *uo)
285 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); 285 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
286unlock: 286unlock:
287 spin_unlock(&udp_offload_lock); 287 spin_unlock(&udp_offload_lock);
288 if (uo_priv != NULL) 288 if (uo_priv)
289 call_rcu(&uo_priv->rcu, udp_offload_free_routine); 289 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
290} 290}
291EXPORT_SYMBOL(udp_del_offload); 291EXPORT_SYMBOL(udp_del_offload);
@@ -394,7 +394,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
394 break; 394 break;
395 } 395 }
396 396
397 if (uo_priv != NULL) { 397 if (uo_priv) {
398 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; 398 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
399 err = uo_priv->offload->callbacks.gro_complete(skb, 399 err = uo_priv->offload->callbacks.gro_complete(skb,
400 nhoff + sizeof(struct udphdr), 400 nhoff + sizeof(struct udphdr),