diff options
author | David S. Miller <davem@davemloft.net> | 2011-03-08 17:59:28 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-03-08 17:59:28 -0500 |
commit | 7b46ac4e77f3224a1befe032c77f1df31d1b42c4 (patch) | |
tree | e0bd89b476e0f07e23f949366c283f7d7d6f9d4e /net/ipv4/inetpeer.c | |
parent | 5217e8794619ac0a29151f29be20c7d6188303ba (diff) |
inetpeer: Don't disable BH for initial fast RCU lookup.
If modifications on other cpus are ok, then modifications to
the tree during lookup done by the local cpu are ok too.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inetpeer.c')
-rw-r--r-- | net/ipv4/inetpeer.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index f604ffdbea27..6442c35edb0b 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -206,16 +206,16 @@ static int addr_compare(const struct inetpeer_addr *a, | |||
206 | }) | 206 | }) |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * Called with rcu_read_lock_bh() | 209 | * Called with rcu_read_lock() |
210 | * Because we hold no lock against a writer, its quite possible we fall | 210 | * Because we hold no lock against a writer, its quite possible we fall |
211 | * in an endless loop. | 211 | * in an endless loop. |
212 | * But every pointer we follow is guaranteed to be valid thanks to RCU. | 212 | * But every pointer we follow is guaranteed to be valid thanks to RCU. |
213 | * We exit from this function if number of links exceeds PEER_MAXDEPTH | 213 | * We exit from this function if number of links exceeds PEER_MAXDEPTH |
214 | */ | 214 | */ |
215 | static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr, | 215 | static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, |
216 | struct inet_peer_base *base) | 216 | struct inet_peer_base *base) |
217 | { | 217 | { |
218 | struct inet_peer *u = rcu_dereference_bh(base->root); | 218 | struct inet_peer *u = rcu_dereference(base->root); |
219 | int count = 0; | 219 | int count = 0; |
220 | 220 | ||
221 | while (u != peer_avl_empty) { | 221 | while (u != peer_avl_empty) { |
@@ -231,9 +231,9 @@ static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr, | |||
231 | return u; | 231 | return u; |
232 | } | 232 | } |
233 | if (cmp == -1) | 233 | if (cmp == -1) |
234 | u = rcu_dereference_bh(u->avl_left); | 234 | u = rcu_dereference(u->avl_left); |
235 | else | 235 | else |
236 | u = rcu_dereference_bh(u->avl_right); | 236 | u = rcu_dereference(u->avl_right); |
237 | if (unlikely(++count == PEER_MAXDEPTH)) | 237 | if (unlikely(++count == PEER_MAXDEPTH)) |
238 | break; | 238 | break; |
239 | } | 239 | } |
@@ -470,11 +470,11 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) | |||
470 | /* Look up for the address quickly, lockless. | 470 | /* Look up for the address quickly, lockless. |
471 | * Because of a concurrent writer, we might not find an existing entry. | 471 | * Because of a concurrent writer, we might not find an existing entry. |
472 | */ | 472 | */ |
473 | rcu_read_lock_bh(); | 473 | rcu_read_lock(); |
474 | sequence = read_seqbegin(&base->lock); | 474 | sequence = read_seqbegin(&base->lock); |
475 | p = lookup_rcu_bh(daddr, base); | 475 | p = lookup_rcu(daddr, base); |
476 | invalidated = read_seqretry(&base->lock, sequence); | 476 | invalidated = read_seqretry(&base->lock, sequence); |
477 | rcu_read_unlock_bh(); | 477 | rcu_read_unlock(); |
478 | 478 | ||
479 | if (p) { | 479 | if (p) { |
480 | /* The existing node has been found. | 480 | /* The existing node has been found. |