diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 2 | ||||
-rw-r--r-- | net/core/neighbour.c | 18 | ||||
-rw-r--r-- | net/ipv4/inet_hashtables.c | 4 | ||||
-rw-r--r-- | net/ipv4/raw.c | 2 | ||||
-rw-r--r-- | net/ipv4/route.c | 10 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 12 | ||||
-rw-r--r-- | net/ipv4/udp.c | 10 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 12 | ||||
-rw-r--r-- | net/ipv6/inet6_hashtables.c | 2 | ||||
-rw-r--r-- | net/ipv6/raw.c | 2 | ||||
-rw-r--r-- | net/ipv6/udp.c | 2 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 8 | ||||
-rw-r--r-- | net/unix/af_unix.c | 4 |
13 files changed, 44 insertions, 44 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 812534828914..75c3f7f4edd5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4136,7 +4136,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
4136 | 4136 | ||
4137 | /* Get out if there is nothing todo */ | 4137 | /* Get out if there is nothing todo */ |
4138 | err = 0; | 4138 | err = 0; |
4139 | if (dev_net(dev) == net) | 4139 | if (net_eq(dev_net(dev), net)) |
4140 | goto out; | 4140 | goto out; |
4141 | 4141 | ||
4142 | /* Pick the destination device name, and ensure | 4142 | /* Pick the destination device name, and ensure |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index de654ea8a944..857915a12c15 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -388,7 +388,7 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, | |||
388 | hash_val = tbl->hash(pkey, NULL); | 388 | hash_val = tbl->hash(pkey, NULL); |
389 | for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) { | 389 | for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) { |
390 | if (!memcmp(n->primary_key, pkey, key_len) && | 390 | if (!memcmp(n->primary_key, pkey, key_len) && |
391 | dev_net(n->dev) == net) { | 391 | net_eq(dev_net(n->dev), net)) { |
392 | neigh_hold(n); | 392 | neigh_hold(n); |
393 | NEIGH_CACHE_STAT_INC(tbl, hits); | 393 | NEIGH_CACHE_STAT_INC(tbl, hits); |
394 | break; | 394 | break; |
@@ -483,7 +483,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, | |||
483 | 483 | ||
484 | for (n = tbl->phash_buckets[hash_val]; n; n = n->next) { | 484 | for (n = tbl->phash_buckets[hash_val]; n; n = n->next) { |
485 | if (!memcmp(n->key, pkey, key_len) && | 485 | if (!memcmp(n->key, pkey, key_len) && |
486 | (pneigh_net(n) == net) && | 486 | net_eq(pneigh_net(n), net) && |
487 | (n->dev == dev || !n->dev)) { | 487 | (n->dev == dev || !n->dev)) { |
488 | read_unlock_bh(&tbl->lock); | 488 | read_unlock_bh(&tbl->lock); |
489 | goto out; | 489 | goto out; |
@@ -542,7 +542,7 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, | |||
542 | for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; | 542 | for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; |
543 | np = &n->next) { | 543 | np = &n->next) { |
544 | if (!memcmp(n->key, pkey, key_len) && n->dev == dev && | 544 | if (!memcmp(n->key, pkey, key_len) && n->dev == dev && |
545 | (pneigh_net(n) == net)) { | 545 | net_eq(pneigh_net(n), net)) { |
546 | *np = n->next; | 546 | *np = n->next; |
547 | write_unlock_bh(&tbl->lock); | 547 | write_unlock_bh(&tbl->lock); |
548 | if (tbl->pdestructor) | 548 | if (tbl->pdestructor) |
@@ -1286,7 +1286,7 @@ static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, | |||
1286 | struct neigh_parms *p; | 1286 | struct neigh_parms *p; |
1287 | 1287 | ||
1288 | for (p = &tbl->parms; p; p = p->next) { | 1288 | for (p = &tbl->parms; p; p = p->next) { |
1289 | if ((p->dev && p->dev->ifindex == ifindex && neigh_parms_net(p) == net) || | 1289 | if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || |
1290 | (!p->dev && !ifindex)) | 1290 | (!p->dev && !ifindex)) |
1291 | return p; | 1291 | return p; |
1292 | } | 1292 | } |
@@ -1964,7 +1964,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) | |||
1964 | break; | 1964 | break; |
1965 | 1965 | ||
1966 | for (nidx = 0, p = tbl->parms.next; p; p = p->next) { | 1966 | for (nidx = 0, p = tbl->parms.next; p; p = p->next) { |
1967 | if (net != neigh_parms_net(p)) | 1967 | if (!net_eq(neigh_parms_net(p), net)) |
1968 | continue; | 1968 | continue; |
1969 | 1969 | ||
1970 | if (nidx++ < neigh_skip) | 1970 | if (nidx++ < neigh_skip) |
@@ -2161,7 +2161,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq) | |||
2161 | n = tbl->hash_buckets[bucket]; | 2161 | n = tbl->hash_buckets[bucket]; |
2162 | 2162 | ||
2163 | while (n) { | 2163 | while (n) { |
2164 | if (dev_net(n->dev) != net) | 2164 | if (!net_eq(dev_net(n->dev), net)) |
2165 | goto next; | 2165 | goto next; |
2166 | if (state->neigh_sub_iter) { | 2166 | if (state->neigh_sub_iter) { |
2167 | loff_t fakep = 0; | 2167 | loff_t fakep = 0; |
@@ -2204,7 +2204,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq, | |||
2204 | 2204 | ||
2205 | while (1) { | 2205 | while (1) { |
2206 | while (n) { | 2206 | while (n) { |
2207 | if (dev_net(n->dev) != net) | 2207 | if (!net_eq(dev_net(n->dev), net)) |
2208 | goto next; | 2208 | goto next; |
2209 | if (state->neigh_sub_iter) { | 2209 | if (state->neigh_sub_iter) { |
2210 | void *v = state->neigh_sub_iter(state, n, pos); | 2210 | void *v = state->neigh_sub_iter(state, n, pos); |
@@ -2260,7 +2260,7 @@ static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) | |||
2260 | state->flags |= NEIGH_SEQ_IS_PNEIGH; | 2260 | state->flags |= NEIGH_SEQ_IS_PNEIGH; |
2261 | for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { | 2261 | for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { |
2262 | pn = tbl->phash_buckets[bucket]; | 2262 | pn = tbl->phash_buckets[bucket]; |
2263 | while (pn && (pneigh_net(pn) != net)) | 2263 | while (pn && !net_eq(pneigh_net(pn), net)) |
2264 | pn = pn->next; | 2264 | pn = pn->next; |
2265 | if (pn) | 2265 | if (pn) |
2266 | break; | 2266 | break; |
@@ -2283,7 +2283,7 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, | |||
2283 | if (++state->bucket > PNEIGH_HASHMASK) | 2283 | if (++state->bucket > PNEIGH_HASHMASK) |
2284 | break; | 2284 | break; |
2285 | pn = tbl->phash_buckets[state->bucket]; | 2285 | pn = tbl->phash_buckets[state->bucket]; |
2286 | while (pn && (pneigh_net(pn) != net)) | 2286 | while (pn && !net_eq(pneigh_net(pn), net)) |
2287 | pn = pn->next; | 2287 | pn = pn->next; |
2288 | if (pn) | 2288 | if (pn) |
2289 | break; | 2289 | break; |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 1064111e5b96..1b6ff513c75d 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -139,7 +139,7 @@ static struct sock *inet_lookup_listener_slow(struct net *net, | |||
139 | sk_for_each(sk, node, head) { | 139 | sk_for_each(sk, node, head) { |
140 | const struct inet_sock *inet = inet_sk(sk); | 140 | const struct inet_sock *inet = inet_sk(sk); |
141 | 141 | ||
142 | if (sock_net(sk) == net && inet->num == hnum && | 142 | if (net_eq(sock_net(sk), net) && inet->num == hnum && |
143 | !ipv6_only_sock(sk)) { | 143 | !ipv6_only_sock(sk)) { |
144 | const __be32 rcv_saddr = inet->rcv_saddr; | 144 | const __be32 rcv_saddr = inet->rcv_saddr; |
145 | int score = sk->sk_family == PF_INET ? 1 : 0; | 145 | int score = sk->sk_family == PF_INET ? 1 : 0; |
@@ -182,7 +182,7 @@ struct sock *__inet_lookup_listener(struct net *net, | |||
182 | if (inet->num == hnum && !sk->sk_node.next && | 182 | if (inet->num == hnum && !sk->sk_node.next && |
183 | (!inet->rcv_saddr || inet->rcv_saddr == daddr) && | 183 | (!inet->rcv_saddr || inet->rcv_saddr == daddr) && |
184 | (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) && | 184 | (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) && |
185 | !sk->sk_bound_dev_if && sock_net(sk) == net) | 185 | !sk->sk_bound_dev_if && net_eq(sock_net(sk), net)) |
186 | goto sherry_cache; | 186 | goto sherry_cache; |
187 | sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif); | 187 | sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif); |
188 | } | 188 | } |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 25dc8b38cac3..d965f0a39c84 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -117,7 +117,7 @@ static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, | |||
117 | sk_for_each_from(sk, node) { | 117 | sk_for_each_from(sk, node) { |
118 | struct inet_sock *inet = inet_sk(sk); | 118 | struct inet_sock *inet = inet_sk(sk); |
119 | 119 | ||
120 | if (sock_net(sk) == net && inet->num == num && | 120 | if (net_eq(sock_net(sk), net) && inet->num == num && |
121 | !(inet->daddr && inet->daddr != raddr) && | 121 | !(inet->daddr && inet->daddr != raddr) && |
122 | !(inet->rcv_saddr && inet->rcv_saddr != laddr) && | 122 | !(inet->rcv_saddr && inet->rcv_saddr != laddr) && |
123 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) | 123 | !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index eab8d75e5222..230716c2dfe0 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1196,7 +1196,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1196 | rth->fl.oif != ikeys[k] || | 1196 | rth->fl.oif != ikeys[k] || |
1197 | rth->fl.iif != 0 || | 1197 | rth->fl.iif != 0 || |
1198 | rth->rt_genid != atomic_read(&rt_genid) || | 1198 | rth->rt_genid != atomic_read(&rt_genid) || |
1199 | dev_net(rth->u.dst.dev) != net) { | 1199 | !net_eq(dev_net(rth->u.dst.dev), net)) { |
1200 | rthp = &rth->u.dst.rt_next; | 1200 | rthp = &rth->u.dst.rt_next; |
1201 | continue; | 1201 | continue; |
1202 | } | 1202 | } |
@@ -1455,7 +1455,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1455 | rth->rt_src == iph->saddr && | 1455 | rth->rt_src == iph->saddr && |
1456 | rth->fl.iif == 0 && | 1456 | rth->fl.iif == 0 && |
1457 | !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) && | 1457 | !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) && |
1458 | dev_net(rth->u.dst.dev) == net && | 1458 | net_eq(dev_net(rth->u.dst.dev), net) && |
1459 | rth->rt_genid == atomic_read(&rt_genid)) { | 1459 | rth->rt_genid == atomic_read(&rt_genid)) { |
1460 | unsigned short mtu = new_mtu; | 1460 | unsigned short mtu = new_mtu; |
1461 | 1461 | ||
@@ -2085,7 +2085,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2085 | rth->fl.oif == 0 && | 2085 | rth->fl.oif == 0 && |
2086 | rth->fl.mark == skb->mark && | 2086 | rth->fl.mark == skb->mark && |
2087 | rth->fl.fl4_tos == tos && | 2087 | rth->fl.fl4_tos == tos && |
2088 | dev_net(rth->u.dst.dev) == net && | 2088 | net_eq(dev_net(rth->u.dst.dev), net) && |
2089 | rth->rt_genid == atomic_read(&rt_genid)) { | 2089 | rth->rt_genid == atomic_read(&rt_genid)) { |
2090 | dst_use(&rth->u.dst, jiffies); | 2090 | dst_use(&rth->u.dst, jiffies); |
2091 | RT_CACHE_STAT_INC(in_hit); | 2091 | RT_CACHE_STAT_INC(in_hit); |
@@ -2487,7 +2487,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2487 | rth->fl.mark == flp->mark && | 2487 | rth->fl.mark == flp->mark && |
2488 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & | 2488 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & |
2489 | (IPTOS_RT_MASK | RTO_ONLINK)) && | 2489 | (IPTOS_RT_MASK | RTO_ONLINK)) && |
2490 | dev_net(rth->u.dst.dev) == net && | 2490 | net_eq(dev_net(rth->u.dst.dev), net) && |
2491 | rth->rt_genid == atomic_read(&rt_genid)) { | 2491 | rth->rt_genid == atomic_read(&rt_genid)) { |
2492 | dst_use(&rth->u.dst, jiffies); | 2492 | dst_use(&rth->u.dst, jiffies); |
2493 | RT_CACHE_STAT_INC(out_hit); | 2493 | RT_CACHE_STAT_INC(out_hit); |
@@ -2796,7 +2796,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2796 | rcu_read_lock_bh(); | 2796 | rcu_read_lock_bh(); |
2797 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; | 2797 | for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; |
2798 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { | 2798 | rt = rcu_dereference(rt->u.dst.rt_next), idx++) { |
2799 | if (dev_net(rt->u.dst.dev) != net || idx < s_idx) | 2799 | if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) |
2800 | continue; | 2800 | continue; |
2801 | if (rt->rt_genid != atomic_read(&rt_genid)) | 2801 | if (rt->rt_genid != atomic_read(&rt_genid)) |
2802 | continue; | 2802 | continue; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 46847e600a46..2a5881c81778 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1974,7 +1974,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1974 | while (1) { | 1974 | while (1) { |
1975 | while (req) { | 1975 | while (req) { |
1976 | if (req->rsk_ops->family == st->family && | 1976 | if (req->rsk_ops->family == st->family && |
1977 | sock_net(req->sk) == net) { | 1977 | net_eq(sock_net(req->sk), net)) { |
1978 | cur = req; | 1978 | cur = req; |
1979 | goto out; | 1979 | goto out; |
1980 | } | 1980 | } |
@@ -1998,7 +1998,7 @@ get_req: | |||
1998 | } | 1998 | } |
1999 | get_sk: | 1999 | get_sk: |
2000 | sk_for_each_from(sk, node) { | 2000 | sk_for_each_from(sk, node) { |
2001 | if (sk->sk_family == st->family && sock_net(sk) == net) { | 2001 | if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) { |
2002 | cur = sk; | 2002 | cur = sk; |
2003 | goto out; | 2003 | goto out; |
2004 | } | 2004 | } |
@@ -2049,7 +2049,7 @@ static void *established_get_first(struct seq_file *seq) | |||
2049 | read_lock_bh(lock); | 2049 | read_lock_bh(lock); |
2050 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { | 2050 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { |
2051 | if (sk->sk_family != st->family || | 2051 | if (sk->sk_family != st->family || |
2052 | sock_net(sk) != net) { | 2052 | !net_eq(sock_net(sk), net)) { |
2053 | continue; | 2053 | continue; |
2054 | } | 2054 | } |
2055 | rc = sk; | 2055 | rc = sk; |
@@ -2059,7 +2059,7 @@ static void *established_get_first(struct seq_file *seq) | |||
2059 | inet_twsk_for_each(tw, node, | 2059 | inet_twsk_for_each(tw, node, |
2060 | &tcp_hashinfo.ehash[st->bucket].twchain) { | 2060 | &tcp_hashinfo.ehash[st->bucket].twchain) { |
2061 | if (tw->tw_family != st->family || | 2061 | if (tw->tw_family != st->family || |
2062 | twsk_net(tw) != net) { | 2062 | !net_eq(twsk_net(tw), net)) { |
2063 | continue; | 2063 | continue; |
2064 | } | 2064 | } |
2065 | rc = tw; | 2065 | rc = tw; |
@@ -2086,7 +2086,7 @@ static void *established_get_next(struct seq_file *seq, void *cur) | |||
2086 | tw = cur; | 2086 | tw = cur; |
2087 | tw = tw_next(tw); | 2087 | tw = tw_next(tw); |
2088 | get_tw: | 2088 | get_tw: |
2089 | while (tw && (tw->tw_family != st->family || twsk_net(tw) != net)) { | 2089 | while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) { |
2090 | tw = tw_next(tw); | 2090 | tw = tw_next(tw); |
2091 | } | 2091 | } |
2092 | if (tw) { | 2092 | if (tw) { |
@@ -2107,7 +2107,7 @@ get_tw: | |||
2107 | sk = sk_next(sk); | 2107 | sk = sk_next(sk); |
2108 | 2108 | ||
2109 | sk_for_each_from(sk, node) { | 2109 | sk_for_each_from(sk, node) { |
2110 | if (sk->sk_family == st->family && sock_net(sk) == net) | 2110 | if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) |
2111 | goto found; | 2111 | goto found; |
2112 | } | 2112 | } |
2113 | 2113 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 76d52d37d6ac..80007c79f12f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -137,7 +137,7 @@ static inline int __udp_lib_lport_inuse(struct net *net, __u16 num, | |||
137 | struct hlist_node *node; | 137 | struct hlist_node *node; |
138 | 138 | ||
139 | sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) | 139 | sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) |
140 | if (sock_net(sk) == net && sk->sk_hash == num) | 140 | if (net_eq(sock_net(sk), net) && sk->sk_hash == num) |
141 | return 1; | 141 | return 1; |
142 | return 0; | 142 | return 0; |
143 | } | 143 | } |
@@ -218,7 +218,7 @@ gotit: | |||
218 | sk_for_each(sk2, node, head) | 218 | sk_for_each(sk2, node, head) |
219 | if (sk2->sk_hash == snum && | 219 | if (sk2->sk_hash == snum && |
220 | sk2 != sk && | 220 | sk2 != sk && |
221 | sock_net(sk2) == net && | 221 | net_eq(sock_net(sk2), net) && |
222 | (!sk2->sk_reuse || !sk->sk_reuse) && | 222 | (!sk2->sk_reuse || !sk->sk_reuse) && |
223 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if | 223 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if |
224 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | 224 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
@@ -269,7 +269,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
269 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { | 269 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { |
270 | struct inet_sock *inet = inet_sk(sk); | 270 | struct inet_sock *inet = inet_sk(sk); |
271 | 271 | ||
272 | if (sock_net(sk) == net && sk->sk_hash == hnum && | 272 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && |
273 | !ipv6_only_sock(sk)) { | 273 | !ipv6_only_sock(sk)) { |
274 | int score = (sk->sk_family == PF_INET ? 1 : 0); | 274 | int score = (sk->sk_family == PF_INET ? 1 : 0); |
275 | if (inet->rcv_saddr) { | 275 | if (inet->rcv_saddr) { |
@@ -1511,7 +1511,7 @@ static struct sock *udp_get_first(struct seq_file *seq) | |||
1511 | for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { | 1511 | for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { |
1512 | struct hlist_node *node; | 1512 | struct hlist_node *node; |
1513 | sk_for_each(sk, node, state->hashtable + state->bucket) { | 1513 | sk_for_each(sk, node, state->hashtable + state->bucket) { |
1514 | if (sock_net(sk) != net) | 1514 | if (!net_eq(sock_net(sk), net)) |
1515 | continue; | 1515 | continue; |
1516 | if (sk->sk_family == state->family) | 1516 | if (sk->sk_family == state->family) |
1517 | goto found; | 1517 | goto found; |
@@ -1531,7 +1531,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) | |||
1531 | sk = sk_next(sk); | 1531 | sk = sk_next(sk); |
1532 | try_again: | 1532 | try_again: |
1533 | ; | 1533 | ; |
1534 | } while (sk && (sock_net(sk) != net || sk->sk_family != state->family)); | 1534 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); |
1535 | 1535 | ||
1536 | if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { | 1536 | if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { |
1537 | sk = sk_head(state->hashtable + state->bucket); | 1537 | sk = sk_head(state->hashtable + state->bucket); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ac5d4f4b6312..5ab9973571ef 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1217,7 +1217,7 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr, | |||
1217 | 1217 | ||
1218 | read_lock_bh(&addrconf_hash_lock); | 1218 | read_lock_bh(&addrconf_hash_lock); |
1219 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1219 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { |
1220 | if (dev_net(ifp->idev->dev) != net) | 1220 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1221 | continue; | 1221 | continue; |
1222 | if (ipv6_addr_equal(&ifp->addr, addr) && | 1222 | if (ipv6_addr_equal(&ifp->addr, addr) && |
1223 | !(ifp->flags&IFA_F_TENTATIVE)) { | 1223 | !(ifp->flags&IFA_F_TENTATIVE)) { |
@@ -1239,7 +1239,7 @@ int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | |||
1239 | u8 hash = ipv6_addr_hash(addr); | 1239 | u8 hash = ipv6_addr_hash(addr); |
1240 | 1240 | ||
1241 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1241 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { |
1242 | if (dev_net(ifp->idev->dev) != net) | 1242 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1243 | continue; | 1243 | continue; |
1244 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1244 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
1245 | if (dev == NULL || ifp->idev->dev == dev) | 1245 | if (dev == NULL || ifp->idev->dev == dev) |
@@ -1257,7 +1257,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, struct in6_addr *addr, | |||
1257 | 1257 | ||
1258 | read_lock_bh(&addrconf_hash_lock); | 1258 | read_lock_bh(&addrconf_hash_lock); |
1259 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { | 1259 | for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { |
1260 | if (dev_net(ifp->idev->dev) != net) | 1260 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
1261 | continue; | 1261 | continue; |
1262 | if (ipv6_addr_equal(&ifp->addr, addr)) { | 1262 | if (ipv6_addr_equal(&ifp->addr, addr)) { |
1263 | if (dev == NULL || ifp->idev->dev == dev || | 1263 | if (dev == NULL || ifp->idev->dev == dev || |
@@ -2771,7 +2771,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq) | |||
2771 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { | 2771 | for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { |
2772 | ifa = inet6_addr_lst[state->bucket]; | 2772 | ifa = inet6_addr_lst[state->bucket]; |
2773 | 2773 | ||
2774 | while (ifa && dev_net(ifa->idev->dev) != net) | 2774 | while (ifa && !net_eq(dev_net(ifa->idev->dev), net)) |
2775 | ifa = ifa->lst_next; | 2775 | ifa = ifa->lst_next; |
2776 | if (ifa) | 2776 | if (ifa) |
2777 | break; | 2777 | break; |
@@ -2787,7 +2787,7 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifad | |||
2787 | ifa = ifa->lst_next; | 2787 | ifa = ifa->lst_next; |
2788 | try_again: | 2788 | try_again: |
2789 | if (ifa) { | 2789 | if (ifa) { |
2790 | if (dev_net(ifa->idev->dev) != net) { | 2790 | if (!net_eq(dev_net(ifa->idev->dev), net)) { |
2791 | ifa = ifa->lst_next; | 2791 | ifa = ifa->lst_next; |
2792 | goto try_again; | 2792 | goto try_again; |
2793 | } | 2793 | } |
@@ -2905,7 +2905,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) | |||
2905 | u8 hash = ipv6_addr_hash(addr); | 2905 | u8 hash = ipv6_addr_hash(addr); |
2906 | read_lock_bh(&addrconf_hash_lock); | 2906 | read_lock_bh(&addrconf_hash_lock); |
2907 | for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { | 2907 | for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { |
2908 | if (dev_net(ifp->idev->dev) != net) | 2908 | if (!net_eq(dev_net(ifp->idev->dev), net)) |
2909 | continue; | 2909 | continue; |
2910 | if (ipv6_addr_cmp(&ifp->addr, addr) == 0 && | 2910 | if (ipv6_addr_cmp(&ifp->addr, addr) == 0 && |
2911 | (ifp->flags & IFA_F_HOMEADDRESS)) { | 2911 | (ifp->flags & IFA_F_HOMEADDRESS)) { |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 21c467675412..340c7d42b83a 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -105,7 +105,7 @@ struct sock *inet6_lookup_listener(struct net *net, | |||
105 | 105 | ||
106 | read_lock(&hashinfo->lhash_lock); | 106 | read_lock(&hashinfo->lhash_lock); |
107 | sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) { | 107 | sk_for_each(sk, node, &hashinfo->listening_hash[inet_lhashfn(hnum)]) { |
108 | if (sock_net(sk) == net && inet_sk(sk)->num == hnum && | 108 | if (net_eq(sock_net(sk), net) && inet_sk(sk)->num == hnum && |
109 | sk->sk_family == PF_INET6) { | 109 | sk->sk_family == PF_INET6) { |
110 | const struct ipv6_pinfo *np = inet6_sk(sk); | 110 | const struct ipv6_pinfo *np = inet6_sk(sk); |
111 | 111 | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 12c7a1560977..830da4603697 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -76,7 +76,7 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, | |||
76 | if (inet_sk(sk)->num == num) { | 76 | if (inet_sk(sk)->num == num) { |
77 | struct ipv6_pinfo *np = inet6_sk(sk); | 77 | struct ipv6_pinfo *np = inet6_sk(sk); |
78 | 78 | ||
79 | if (sock_net(sk) != net) | 79 | if (!net_eq(sock_net(sk), net)) |
80 | continue; | 80 | continue; |
81 | 81 | ||
82 | if (!ipv6_addr_any(&np->daddr) && | 82 | if (!ipv6_addr_any(&np->daddr) && |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index db266ff297e5..aacbc82ecf0f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -70,7 +70,7 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
70 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { | 70 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { |
71 | struct inet_sock *inet = inet_sk(sk); | 71 | struct inet_sock *inet = inet_sk(sk); |
72 | 72 | ||
73 | if (sock_net(sk) == net && sk->sk_hash == hnum && | 73 | if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum && |
74 | sk->sk_family == PF_INET6) { | 74 | sk->sk_family == PF_INET6) { |
75 | struct ipv6_pinfo *np = inet6_sk(sk); | 75 | struct ipv6_pinfo *np = inet6_sk(sk); |
76 | int score = 0; | 76 | int score = 0; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1d16d95dfaaf..36f75d873898 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -228,7 +228,7 @@ static inline struct sock *netlink_lookup(struct net *net, int protocol, | |||
228 | read_lock(&nl_table_lock); | 228 | read_lock(&nl_table_lock); |
229 | head = nl_pid_hashfn(hash, pid); | 229 | head = nl_pid_hashfn(hash, pid); |
230 | sk_for_each(sk, node, head) { | 230 | sk_for_each(sk, node, head) { |
231 | if (sock_net(sk) == net && (nlk_sk(sk)->pid == pid)) { | 231 | if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) { |
232 | sock_hold(sk); | 232 | sock_hold(sk); |
233 | goto found; | 233 | goto found; |
234 | } | 234 | } |
@@ -348,7 +348,7 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid) | |||
348 | head = nl_pid_hashfn(hash, pid); | 348 | head = nl_pid_hashfn(hash, pid); |
349 | len = 0; | 349 | len = 0; |
350 | sk_for_each(osk, node, head) { | 350 | sk_for_each(osk, node, head) { |
351 | if (sock_net(osk) == net && (nlk_sk(osk)->pid == pid)) | 351 | if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid)) |
352 | break; | 352 | break; |
353 | len++; | 353 | len++; |
354 | } | 354 | } |
@@ -532,7 +532,7 @@ retry: | |||
532 | netlink_table_grab(); | 532 | netlink_table_grab(); |
533 | head = nl_pid_hashfn(hash, pid); | 533 | head = nl_pid_hashfn(hash, pid); |
534 | sk_for_each(osk, node, head) { | 534 | sk_for_each(osk, node, head) { |
535 | if (sock_net(osk) != net) | 535 | if (!net_eq(sock_net(osk), net)) |
536 | continue; | 536 | continue; |
537 | if (nlk_sk(osk)->pid == pid) { | 537 | if (nlk_sk(osk)->pid == pid) { |
538 | /* Bind collision, search negative pid values. */ | 538 | /* Bind collision, search negative pid values. */ |
@@ -962,7 +962,7 @@ static inline int do_one_broadcast(struct sock *sk, | |||
962 | !test_bit(p->group - 1, nlk->groups)) | 962 | !test_bit(p->group - 1, nlk->groups)) |
963 | goto out; | 963 | goto out; |
964 | 964 | ||
965 | if (sock_net(sk) != p->net) | 965 | if (!net_eq(sock_net(sk), p->net)) |
966 | goto out; | 966 | goto out; |
967 | 967 | ||
968 | if (p->failure) { | 968 | if (p->failure) { |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4a4793051bcb..50bbf6bb1a22 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -252,7 +252,7 @@ static struct sock *__unix_find_socket_byname(struct net *net, | |||
252 | sk_for_each(s, node, &unix_socket_table[hash ^ type]) { | 252 | sk_for_each(s, node, &unix_socket_table[hash ^ type]) { |
253 | struct unix_sock *u = unix_sk(s); | 253 | struct unix_sock *u = unix_sk(s); |
254 | 254 | ||
255 | if (sock_net(s) != net) | 255 | if (!net_eq(sock_net(s), net)) |
256 | continue; | 256 | continue; |
257 | 257 | ||
258 | if (u->addr->len == len && | 258 | if (u->addr->len == len && |
@@ -289,7 +289,7 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i) | |||
289 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { | 289 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { |
290 | struct dentry *dentry = unix_sk(s)->dentry; | 290 | struct dentry *dentry = unix_sk(s)->dentry; |
291 | 291 | ||
292 | if (sock_net(s) != net) | 292 | if (!net_eq(sock_net(s), net)) |
293 | continue; | 293 | continue; |
294 | 294 | ||
295 | if(dentry && dentry->d_inode == i) | 295 | if(dentry && dentry->d_inode == i) |