aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rculist.h17
-rw-r--r--include/net/sock.h57
-rw-r--r--include/net/udp.h2
-rw-r--r--net/ipv4/udp.c47
-rw-r--r--net/ipv6/udp.c26
5 files changed, 83 insertions, 66 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 3ba2998b22b..e649bd3f2c9 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -383,22 +383,5 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
383 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 383 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
384 pos = rcu_dereference(pos->next)) 384 pos = rcu_dereference(pos->next))
385 385
386/**
387 * hlist_for_each_entry_rcu_safenext - iterate over rcu list of given type
388 * @tpos: the type * to use as a loop cursor.
389 * @pos: the &struct hlist_node to use as a loop cursor.
390 * @head: the head for your list.
391 * @member: the name of the hlist_node within the struct.
392 * @next: the &struct hlist_node to use as a next cursor
393 *
394 * Special version of hlist_for_each_entry_rcu that make sure
395 * each next pointer is fetched before each iteration.
396 */
397#define hlist_for_each_entry_rcu_safenext(tpos, pos, head, member, next) \
398 for (pos = rcu_dereference((head)->first); \
399 pos && ({ next = pos->next; smp_rmb(); prefetch(next); 1; }) && \
400 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
401 pos = rcu_dereference(next))
402
403#endif /* __KERNEL__ */ 386#endif /* __KERNEL__ */
404#endif 387#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 8b2b82131b6..0a638948868 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -42,6 +42,7 @@
42 42
43#include <linux/kernel.h> 43#include <linux/kernel.h>
44#include <linux/list.h> 44#include <linux/list.h>
45#include <linux/list_nulls.h>
45#include <linux/timer.h> 46#include <linux/timer.h>
46#include <linux/cache.h> 47#include <linux/cache.h>
47#include <linux/module.h> 48#include <linux/module.h>
@@ -52,6 +53,7 @@
52#include <linux/security.h> 53#include <linux/security.h>
53 54
54#include <linux/filter.h> 55#include <linux/filter.h>
56#include <linux/rculist_nulls.h>
55 57
56#include <asm/atomic.h> 58#include <asm/atomic.h>
57#include <net/dst.h> 59#include <net/dst.h>
@@ -106,6 +108,7 @@ struct net;
106 * @skc_reuse: %SO_REUSEADDR setting 108 * @skc_reuse: %SO_REUSEADDR setting
107 * @skc_bound_dev_if: bound device index if != 0 109 * @skc_bound_dev_if: bound device index if != 0
108 * @skc_node: main hash linkage for various protocol lookup tables 110 * @skc_node: main hash linkage for various protocol lookup tables
111 * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
109 * @skc_bind_node: bind hash linkage for various protocol lookup tables 112 * @skc_bind_node: bind hash linkage for various protocol lookup tables
110 * @skc_refcnt: reference count 113 * @skc_refcnt: reference count
111 * @skc_hash: hash value used with various protocol lookup tables 114 * @skc_hash: hash value used with various protocol lookup tables
@@ -120,7 +123,10 @@ struct sock_common {
120 volatile unsigned char skc_state; 123 volatile unsigned char skc_state;
121 unsigned char skc_reuse; 124 unsigned char skc_reuse;
122 int skc_bound_dev_if; 125 int skc_bound_dev_if;
123 struct hlist_node skc_node; 126 union {
127 struct hlist_node skc_node;
128 struct hlist_nulls_node skc_nulls_node;
129 };
124 struct hlist_node skc_bind_node; 130 struct hlist_node skc_bind_node;
125 atomic_t skc_refcnt; 131 atomic_t skc_refcnt;
126 unsigned int skc_hash; 132 unsigned int skc_hash;
@@ -206,6 +212,7 @@ struct sock {
206#define sk_reuse __sk_common.skc_reuse 212#define sk_reuse __sk_common.skc_reuse
207#define sk_bound_dev_if __sk_common.skc_bound_dev_if 213#define sk_bound_dev_if __sk_common.skc_bound_dev_if
208#define sk_node __sk_common.skc_node 214#define sk_node __sk_common.skc_node
215#define sk_nulls_node __sk_common.skc_nulls_node
209#define sk_bind_node __sk_common.skc_bind_node 216#define sk_bind_node __sk_common.skc_bind_node
210#define sk_refcnt __sk_common.skc_refcnt 217#define sk_refcnt __sk_common.skc_refcnt
211#define sk_hash __sk_common.skc_hash 218#define sk_hash __sk_common.skc_hash
@@ -300,12 +307,30 @@ static inline struct sock *sk_head(const struct hlist_head *head)
300 return hlist_empty(head) ? NULL : __sk_head(head); 307 return hlist_empty(head) ? NULL : __sk_head(head);
301} 308}
302 309
310static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
311{
312 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
313}
314
315static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
316{
317 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
318}
319
303static inline struct sock *sk_next(const struct sock *sk) 320static inline struct sock *sk_next(const struct sock *sk)
304{ 321{
305 return sk->sk_node.next ? 322 return sk->sk_node.next ?
306 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 323 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
307} 324}
308 325
326static inline struct sock *sk_nulls_next(const struct sock *sk)
327{
328 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
329 hlist_nulls_entry(sk->sk_nulls_node.next,
330 struct sock, sk_nulls_node) :
331 NULL;
332}
333
309static inline int sk_unhashed(const struct sock *sk) 334static inline int sk_unhashed(const struct sock *sk)
310{ 335{
311 return hlist_unhashed(&sk->sk_node); 336 return hlist_unhashed(&sk->sk_node);
@@ -321,6 +346,11 @@ static __inline__ void sk_node_init(struct hlist_node *node)
321 node->pprev = NULL; 346 node->pprev = NULL;
322} 347}
323 348
349static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
350{
351 node->pprev = NULL;
352}
353
324static __inline__ void __sk_del_node(struct sock *sk) 354static __inline__ void __sk_del_node(struct sock *sk)
325{ 355{
326 __hlist_del(&sk->sk_node); 356 __hlist_del(&sk->sk_node);
@@ -367,18 +397,18 @@ static __inline__ int sk_del_node_init(struct sock *sk)
367 return rc; 397 return rc;
368} 398}
369 399
370static __inline__ int __sk_del_node_init_rcu(struct sock *sk) 400static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
371{ 401{
372 if (sk_hashed(sk)) { 402 if (sk_hashed(sk)) {
373 hlist_del_init_rcu(&sk->sk_node); 403 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
374 return 1; 404 return 1;
375 } 405 }
376 return 0; 406 return 0;
377} 407}
378 408
379static __inline__ int sk_del_node_init_rcu(struct sock *sk) 409static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
380{ 410{
381 int rc = __sk_del_node_init_rcu(sk); 411 int rc = __sk_nulls_del_node_init_rcu(sk);
382 412
383 if (rc) { 413 if (rc) {
384 /* paranoid for a while -acme */ 414 /* paranoid for a while -acme */
@@ -399,15 +429,15 @@ static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
399 __sk_add_node(sk, list); 429 __sk_add_node(sk, list);
400} 430}
401 431
402static __inline__ void __sk_add_node_rcu(struct sock *sk, struct hlist_head *list) 432static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
403{ 433{
404 hlist_add_head_rcu(&sk->sk_node, list); 434 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
405} 435}
406 436
407static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) 437static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
408{ 438{
409 sock_hold(sk); 439 sock_hold(sk);
410 __sk_add_node_rcu(sk, list); 440 __sk_nulls_add_node_rcu(sk, list);
411} 441}
412 442
413static __inline__ void __sk_del_bind_node(struct sock *sk) 443static __inline__ void __sk_del_bind_node(struct sock *sk)
@@ -423,11 +453,16 @@ static __inline__ void sk_add_bind_node(struct sock *sk,
423 453
424#define sk_for_each(__sk, node, list) \ 454#define sk_for_each(__sk, node, list) \
425 hlist_for_each_entry(__sk, node, list, sk_node) 455 hlist_for_each_entry(__sk, node, list, sk_node)
426#define sk_for_each_rcu_safenext(__sk, node, list, next) \ 456#define sk_nulls_for_each(__sk, node, list) \
427 hlist_for_each_entry_rcu_safenext(__sk, node, list, sk_node, next) 457 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
458#define sk_nulls_for_each_rcu(__sk, node, list) \
459 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
428#define sk_for_each_from(__sk, node) \ 460#define sk_for_each_from(__sk, node) \
429 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 461 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
430 hlist_for_each_entry_from(__sk, node, sk_node) 462 hlist_for_each_entry_from(__sk, node, sk_node)
463#define sk_nulls_for_each_from(__sk, node) \
464 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
465 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
431#define sk_for_each_continue(__sk, node) \ 466#define sk_for_each_continue(__sk, node) \
432 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \ 467 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
433 hlist_for_each_entry_continue(__sk, node, sk_node) 468 hlist_for_each_entry_continue(__sk, node, sk_node)
diff --git a/include/net/udp.h b/include/net/udp.h
index df2bfe54537..90e6ce56be6 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -51,7 +51,7 @@ struct udp_skb_cb {
51#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) 51#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
52 52
53struct udp_hslot { 53struct udp_hslot {
54 struct hlist_head head; 54 struct hlist_nulls_head head;
55 spinlock_t lock; 55 spinlock_t lock;
56} __attribute__((aligned(2 * sizeof(long)))); 56} __attribute__((aligned(2 * sizeof(long))));
57struct udp_table { 57struct udp_table {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 54badc9a019..fea2d873dd4 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -127,9 +127,9 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
127 const struct sock *sk2)) 127 const struct sock *sk2))
128{ 128{
129 struct sock *sk2; 129 struct sock *sk2;
130 struct hlist_node *node; 130 struct hlist_nulls_node *node;
131 131
132 sk_for_each(sk2, node, &hslot->head) 132 sk_nulls_for_each(sk2, node, &hslot->head)
133 if (net_eq(sock_net(sk2), net) && 133 if (net_eq(sock_net(sk2), net) &&
134 sk2 != sk && 134 sk2 != sk &&
135 sk2->sk_hash == num && 135 sk2->sk_hash == num &&
@@ -189,12 +189,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
189 inet_sk(sk)->num = snum; 189 inet_sk(sk)->num = snum;
190 sk->sk_hash = snum; 190 sk->sk_hash = snum;
191 if (sk_unhashed(sk)) { 191 if (sk_unhashed(sk)) {
192 /* 192 sk_nulls_add_node_rcu(sk, &hslot->head);
193 * We need that previous write to sk->sk_hash committed
194 * before write to sk->next done in following add_node() variant
195 */
196 smp_wmb();
197 sk_add_node_rcu(sk, &hslot->head);
198 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 193 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
199 } 194 }
200 error = 0; 195 error = 0;
@@ -261,7 +256,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
261 int dif, struct udp_table *udptable) 256 int dif, struct udp_table *udptable)
262{ 257{
263 struct sock *sk, *result; 258 struct sock *sk, *result;
264 struct hlist_node *node, *next; 259 struct hlist_nulls_node *node;
265 unsigned short hnum = ntohs(dport); 260 unsigned short hnum = ntohs(dport);
266 unsigned int hash = udp_hashfn(net, hnum); 261 unsigned int hash = udp_hashfn(net, hnum);
267 struct udp_hslot *hslot = &udptable->hash[hash]; 262 struct udp_hslot *hslot = &udptable->hash[hash];
@@ -271,13 +266,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
271begin: 266begin:
272 result = NULL; 267 result = NULL;
273 badness = -1; 268 badness = -1;
274 sk_for_each_rcu_safenext(sk, node, &hslot->head, next) { 269 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
275 /*
276 * lockless reader, and SLAB_DESTROY_BY_RCU items:
277 * We must check this item was not moved to another chain
278 */
279 if (udp_hashfn(net, sk->sk_hash) != hash)
280 goto begin;
281 score = compute_score(sk, net, saddr, hnum, sport, 270 score = compute_score(sk, net, saddr, hnum, sport,
282 daddr, dport, dif); 271 daddr, dport, dif);
283 if (score > badness) { 272 if (score > badness) {
@@ -285,6 +274,14 @@ begin:
285 badness = score; 274 badness = score;
286 } 275 }
287 } 276 }
277 /*
278 * if the nulls value we got at the end of this lookup is
279 * not the expected one, we must restart lookup.
280 * We probably met an item that was moved to another chain.
281 */
282 if (get_nulls_value(node) != hash)
283 goto begin;
284
288 if (result) { 285 if (result) {
289 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 286 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
290 result = NULL; 287 result = NULL;
@@ -325,11 +322,11 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
325 __be16 rmt_port, __be32 rmt_addr, 322 __be16 rmt_port, __be32 rmt_addr,
326 int dif) 323 int dif)
327{ 324{
328 struct hlist_node *node; 325 struct hlist_nulls_node *node;
329 struct sock *s = sk; 326 struct sock *s = sk;
330 unsigned short hnum = ntohs(loc_port); 327 unsigned short hnum = ntohs(loc_port);
331 328
332 sk_for_each_from(s, node) { 329 sk_nulls_for_each_from(s, node) {
333 struct inet_sock *inet = inet_sk(s); 330 struct inet_sock *inet = inet_sk(s);
334 331
335 if (!net_eq(sock_net(s), net) || 332 if (!net_eq(sock_net(s), net) ||
@@ -977,7 +974,7 @@ void udp_lib_unhash(struct sock *sk)
977 struct udp_hslot *hslot = &udptable->hash[hash]; 974 struct udp_hslot *hslot = &udptable->hash[hash];
978 975
979 spin_lock_bh(&hslot->lock); 976 spin_lock_bh(&hslot->lock);
980 if (sk_del_node_init_rcu(sk)) { 977 if (sk_nulls_del_node_init_rcu(sk)) {
981 inet_sk(sk)->num = 0; 978 inet_sk(sk)->num = 0;
982 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 979 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
983 } 980 }
@@ -1130,7 +1127,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1130 int dif; 1127 int dif;
1131 1128
1132 spin_lock(&hslot->lock); 1129 spin_lock(&hslot->lock);
1133 sk = sk_head(&hslot->head); 1130 sk = sk_nulls_head(&hslot->head);
1134 dif = skb->dev->ifindex; 1131 dif = skb->dev->ifindex;
1135 sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); 1132 sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
1136 if (sk) { 1133 if (sk) {
@@ -1139,7 +1136,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1139 do { 1136 do {
1140 struct sk_buff *skb1 = skb; 1137 struct sk_buff *skb1 = skb;
1141 1138
1142 sknext = udp_v4_mcast_next(net, sk_next(sk), uh->dest, 1139 sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
1143 daddr, uh->source, saddr, 1140 daddr, uh->source, saddr,
1144 dif); 1141 dif);
1145 if (sknext) 1142 if (sknext)
@@ -1560,10 +1557,10 @@ static struct sock *udp_get_first(struct seq_file *seq, int start)
1560 struct net *net = seq_file_net(seq); 1557 struct net *net = seq_file_net(seq);
1561 1558
1562 for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { 1559 for (state->bucket = start; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1563 struct hlist_node *node; 1560 struct hlist_nulls_node *node;
1564 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; 1561 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
1565 spin_lock_bh(&hslot->lock); 1562 spin_lock_bh(&hslot->lock);
1566 sk_for_each(sk, node, &hslot->head) { 1563 sk_nulls_for_each(sk, node, &hslot->head) {
1567 if (!net_eq(sock_net(sk), net)) 1564 if (!net_eq(sock_net(sk), net))
1568 continue; 1565 continue;
1569 if (sk->sk_family == state->family) 1566 if (sk->sk_family == state->family)
@@ -1582,7 +1579,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1582 struct net *net = seq_file_net(seq); 1579 struct net *net = seq_file_net(seq);
1583 1580
1584 do { 1581 do {
1585 sk = sk_next(sk); 1582 sk = sk_nulls_next(sk);
1586 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); 1583 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
1587 1584
1588 if (!sk) { 1585 if (!sk) {
@@ -1753,7 +1750,7 @@ void __init udp_table_init(struct udp_table *table)
1753 int i; 1750 int i;
1754 1751
1755 for (i = 0; i < UDP_HTABLE_SIZE; i++) { 1752 for (i = 0; i < UDP_HTABLE_SIZE; i++) {
1756 INIT_HLIST_HEAD(&table->hash[i].head); 1753 INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
1757 spin_lock_init(&table->hash[i].lock); 1754 spin_lock_init(&table->hash[i].lock);
1758 } 1755 }
1759} 1756}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8dafa36b1ba..fd2d9ad4a8a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -98,7 +98,7 @@ static struct sock *__udp6_lib_lookup(struct net *net,
98 int dif, struct udp_table *udptable) 98 int dif, struct udp_table *udptable)
99{ 99{
100 struct sock *sk, *result; 100 struct sock *sk, *result;
101 struct hlist_node *node, *next; 101 struct hlist_nulls_node *node;
102 unsigned short hnum = ntohs(dport); 102 unsigned short hnum = ntohs(dport);
103 unsigned int hash = udp_hashfn(net, hnum); 103 unsigned int hash = udp_hashfn(net, hnum);
104 struct udp_hslot *hslot = &udptable->hash[hash]; 104 struct udp_hslot *hslot = &udptable->hash[hash];
@@ -108,19 +108,21 @@ static struct sock *__udp6_lib_lookup(struct net *net,
108begin: 108begin:
109 result = NULL; 109 result = NULL;
110 badness = -1; 110 badness = -1;
111 sk_for_each_rcu_safenext(sk, node, &hslot->head, next) { 111 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
112 /*
113 * lockless reader, and SLAB_DESTROY_BY_RCU items:
114 * We must check this item was not moved to another chain
115 */
116 if (udp_hashfn(net, sk->sk_hash) != hash)
117 goto begin;
118 score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); 112 score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
119 if (score > badness) { 113 if (score > badness) {
120 result = sk; 114 result = sk;
121 badness = score; 115 badness = score;
122 } 116 }
123 } 117 }
118 /*
119 * if the nulls value we got at the end of this lookup is
120 * not the expected one, we must restart lookup.
121 * We probably met an item that was moved to another chain.
122 */
123 if (get_nulls_value(node) != hash)
124 goto begin;
125
124 if (result) { 126 if (result) {
125 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 127 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
126 result = NULL; 128 result = NULL;
@@ -374,11 +376,11 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
374 __be16 rmt_port, struct in6_addr *rmt_addr, 376 __be16 rmt_port, struct in6_addr *rmt_addr,
375 int dif) 377 int dif)
376{ 378{
377 struct hlist_node *node; 379 struct hlist_nulls_node *node;
378 struct sock *s = sk; 380 struct sock *s = sk;
379 unsigned short num = ntohs(loc_port); 381 unsigned short num = ntohs(loc_port);
380 382
381 sk_for_each_from(s, node) { 383 sk_nulls_for_each_from(s, node) {
382 struct inet_sock *inet = inet_sk(s); 384 struct inet_sock *inet = inet_sk(s);
383 385
384 if (!net_eq(sock_net(s), net)) 386 if (!net_eq(sock_net(s), net))
@@ -423,7 +425,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
423 int dif; 425 int dif;
424 426
425 spin_lock(&hslot->lock); 427 spin_lock(&hslot->lock);
426 sk = sk_head(&hslot->head); 428 sk = sk_nulls_head(&hslot->head);
427 dif = inet6_iif(skb); 429 dif = inet6_iif(skb);
428 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); 430 sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
429 if (!sk) { 431 if (!sk) {
@@ -432,7 +434,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
432 } 434 }
433 435
434 sk2 = sk; 436 sk2 = sk;
435 while ((sk2 = udp_v6_mcast_next(net, sk_next(sk2), uh->dest, daddr, 437 while ((sk2 = udp_v6_mcast_next(net, sk_nulls_next(sk2), uh->dest, daddr,
436 uh->source, saddr, dif))) { 438 uh->source, saddr, dif))) {
437 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); 439 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
438 if (buff) { 440 if (buff) {