diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-11-16 22:39:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-16 22:39:21 -0500 |
commit | 88ab1932eac721c6e7336708558fa5ed02c85c80 (patch) | |
tree | c8788a1e3de08100bca341fa4180adfe5d02880f /net/ipv6/udp.c | |
parent | bbaffaca4810de1a25e32ecaf836eeaacc7a3d11 (diff) |
udp: Use hlist_nulls in UDP RCU code
This is a straightforward patch, using hlist_nulls infrastructure.
RCUification already done on UDP two weeks ago.
Using hlist_nulls permits us to avoid some memory barriers, both
at lookup time and delete time.
Patch is large because it adds new macros to include/net/sock.h.
These macros will be used by TCP & DCCP in next patch.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/udp.c')
-rw-r--r-- | net/ipv6/udp.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8dafa36b1ba5..fd2d9ad4a8a3 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -98,7 +98,7 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
98 | int dif, struct udp_table *udptable) | 98 | int dif, struct udp_table *udptable) |
99 | { | 99 | { |
100 | struct sock *sk, *result; | 100 | struct sock *sk, *result; |
101 | struct hlist_node *node, *next; | 101 | struct hlist_nulls_node *node; |
102 | unsigned short hnum = ntohs(dport); | 102 | unsigned short hnum = ntohs(dport); |
103 | unsigned int hash = udp_hashfn(net, hnum); | 103 | unsigned int hash = udp_hashfn(net, hnum); |
104 | struct udp_hslot *hslot = &udptable->hash[hash]; | 104 | struct udp_hslot *hslot = &udptable->hash[hash]; |
@@ -108,19 +108,21 @@ static struct sock *__udp6_lib_lookup(struct net *net, | |||
108 | begin: | 108 | begin: |
109 | result = NULL; | 109 | result = NULL; |
110 | badness = -1; | 110 | badness = -1; |
111 | sk_for_each_rcu_safenext(sk, node, &hslot->head, next) { | 111 | sk_nulls_for_each_rcu(sk, node, &hslot->head) { |
112 | /* | ||
113 | * lockless reader, and SLAB_DESTROY_BY_RCU items: | ||
114 | * We must check this item was not moved to another chain | ||
115 | */ | ||
116 | if (udp_hashfn(net, sk->sk_hash) != hash) | ||
117 | goto begin; | ||
118 | score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); | 112 | score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); |
119 | if (score > badness) { | 113 | if (score > badness) { |
120 | result = sk; | 114 | result = sk; |
121 | badness = score; | 115 | badness = score; |
122 | } | 116 | } |
123 | } | 117 | } |
118 | /* | ||
119 | * if the nulls value we got at the end of this lookup is | ||
120 | * not the expected one, we must restart lookup. | ||
121 | * We probably met an item that was moved to another chain. | ||
122 | */ | ||
123 | if (get_nulls_value(node) != hash) | ||
124 | goto begin; | ||
125 | |||
124 | if (result) { | 126 | if (result) { |
125 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) | 127 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) |
126 | result = NULL; | 128 | result = NULL; |
@@ -374,11 +376,11 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, | |||
374 | __be16 rmt_port, struct in6_addr *rmt_addr, | 376 | __be16 rmt_port, struct in6_addr *rmt_addr, |
375 | int dif) | 377 | int dif) |
376 | { | 378 | { |
377 | struct hlist_node *node; | 379 | struct hlist_nulls_node *node; |
378 | struct sock *s = sk; | 380 | struct sock *s = sk; |
379 | unsigned short num = ntohs(loc_port); | 381 | unsigned short num = ntohs(loc_port); |
380 | 382 | ||
381 | sk_for_each_from(s, node) { | 383 | sk_nulls_for_each_from(s, node) { |
382 | struct inet_sock *inet = inet_sk(s); | 384 | struct inet_sock *inet = inet_sk(s); |
383 | 385 | ||
384 | if (!net_eq(sock_net(s), net)) | 386 | if (!net_eq(sock_net(s), net)) |
@@ -423,7 +425,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
423 | int dif; | 425 | int dif; |
424 | 426 | ||
425 | spin_lock(&hslot->lock); | 427 | spin_lock(&hslot->lock); |
426 | sk = sk_head(&hslot->head); | 428 | sk = sk_nulls_head(&hslot->head); |
427 | dif = inet6_iif(skb); | 429 | dif = inet6_iif(skb); |
428 | sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); | 430 | sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); |
429 | if (!sk) { | 431 | if (!sk) { |
@@ -432,7 +434,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
432 | } | 434 | } |
433 | 435 | ||
434 | sk2 = sk; | 436 | sk2 = sk; |
435 | while ((sk2 = udp_v6_mcast_next(net, sk_next(sk2), uh->dest, daddr, | 437 | while ((sk2 = udp_v6_mcast_next(net, sk_nulls_next(sk2), uh->dest, daddr, |
436 | uh->source, saddr, dif))) { | 438 | uh->source, saddr, dif))) { |
437 | struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); | 439 | struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); |
438 | if (buff) { | 440 | if (buff) { |