aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-10-06 20:49:21 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-06 21:01:33 -0400
commit767e97e1e0db0d0f3152cd2f3bd3403596aedbad (patch)
treecda6089e75f98fc3c9812993cf790fccac98b67a
parent546add79468183f266c75c632c96e4b0029e0d96 (diff)
neigh: RCU conversion of struct neighbour
This is the second step for neighbour RCU conversion. (first was commit d6bf7817 : RCU conversion of neigh hash table) neigh_lookup() becomes lockless, but still take a reference on found neighbour. (no more read_lock()/read_unlock() on tbl->lock) struct neighbour gets an additional rcu_head field and is freed after an RCU grace period. Future work would need to eventually not take a reference on neighbour for temporary dst (DST_NOCACHE), but this would need dst->_neighbour to use a noref bit like we did for skb->_dst. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/neighbour.h5
-rw-r--r--net/core/neighbour.c137
2 files changed, 88 insertions, 54 deletions
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 37845dae6488..a4538d553704 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -91,7 +91,7 @@ struct neigh_statistics {
91#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) 91#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
92 92
93struct neighbour { 93struct neighbour {
94 struct neighbour *next; 94 struct neighbour __rcu *next;
95 struct neigh_table *tbl; 95 struct neigh_table *tbl;
96 struct neigh_parms *parms; 96 struct neigh_parms *parms;
97 struct net_device *dev; 97 struct net_device *dev;
@@ -111,6 +111,7 @@ struct neighbour {
111 struct sk_buff_head arp_queue; 111 struct sk_buff_head arp_queue;
112 struct timer_list timer; 112 struct timer_list timer;
113 const struct neigh_ops *ops; 113 const struct neigh_ops *ops;
114 struct rcu_head rcu;
114 u8 primary_key[0]; 115 u8 primary_key[0];
115}; 116};
116 117
@@ -139,7 +140,7 @@ struct pneigh_entry {
139 */ 140 */
140 141
141struct neigh_hash_table { 142struct neigh_hash_table {
142 struct neighbour **hash_buckets; 143 struct neighbour __rcu **hash_buckets;
143 unsigned int hash_mask; 144 unsigned int hash_mask;
144 __u32 hash_rnd; 145 __u32 hash_rnd;
145 struct rcu_head rcu; 146 struct rcu_head rcu;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index dd8920e4f508..3ffafaa0414c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -139,10 +139,12 @@ static int neigh_forced_gc(struct neigh_table *tbl)
139 nht = rcu_dereference_protected(tbl->nht, 139 nht = rcu_dereference_protected(tbl->nht,
140 lockdep_is_held(&tbl->lock)); 140 lockdep_is_held(&tbl->lock));
141 for (i = 0; i <= nht->hash_mask; i++) { 141 for (i = 0; i <= nht->hash_mask; i++) {
142 struct neighbour *n, **np; 142 struct neighbour *n;
143 struct neighbour __rcu **np;
143 144
144 np = &nht->hash_buckets[i]; 145 np = &nht->hash_buckets[i];
145 while ((n = *np) != NULL) { 146 while ((n = rcu_dereference_protected(*np,
147 lockdep_is_held(&tbl->lock))) != NULL) {
146 /* Neighbour record may be discarded if: 148 /* Neighbour record may be discarded if:
147 * - nobody refers to it. 149 * - nobody refers to it.
148 * - it is not permanent 150 * - it is not permanent
@@ -150,7 +152,9 @@ static int neigh_forced_gc(struct neigh_table *tbl)
150 write_lock(&n->lock); 152 write_lock(&n->lock);
151 if (atomic_read(&n->refcnt) == 1 && 153 if (atomic_read(&n->refcnt) == 1 &&
152 !(n->nud_state & NUD_PERMANENT)) { 154 !(n->nud_state & NUD_PERMANENT)) {
153 *np = n->next; 155 rcu_assign_pointer(*np,
156 rcu_dereference_protected(n->next,
157 lockdep_is_held(&tbl->lock)));
154 n->dead = 1; 158 n->dead = 1;
155 shrunk = 1; 159 shrunk = 1;
156 write_unlock(&n->lock); 160 write_unlock(&n->lock);
@@ -208,14 +212,18 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
208 lockdep_is_held(&tbl->lock)); 212 lockdep_is_held(&tbl->lock));
209 213
210 for (i = 0; i <= nht->hash_mask; i++) { 214 for (i = 0; i <= nht->hash_mask; i++) {
211 struct neighbour *n, **np = &nht->hash_buckets[i]; 215 struct neighbour *n;
216 struct neighbour __rcu **np = &nht->hash_buckets[i];
212 217
213 while ((n = *np) != NULL) { 218 while ((n = rcu_dereference_protected(*np,
219 lockdep_is_held(&tbl->lock))) != NULL) {
214 if (dev && n->dev != dev) { 220 if (dev && n->dev != dev) {
215 np = &n->next; 221 np = &n->next;
216 continue; 222 continue;
217 } 223 }
218 *np = n->next; 224 rcu_assign_pointer(*np,
225 rcu_dereference_protected(n->next,
226 lockdep_is_held(&tbl->lock)));
219 write_lock(&n->lock); 227 write_lock(&n->lock);
220 neigh_del_timer(n); 228 neigh_del_timer(n);
221 n->dead = 1; 229 n->dead = 1;
@@ -323,7 +331,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
323 kfree(ret); 331 kfree(ret);
324 return NULL; 332 return NULL;
325 } 333 }
326 ret->hash_buckets = buckets; 334 rcu_assign_pointer(ret->hash_buckets, buckets);
327 ret->hash_mask = entries - 1; 335 ret->hash_mask = entries - 1;
328 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd)); 336 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
329 return ret; 337 return ret;
@@ -362,17 +370,22 @@ static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
362 for (i = 0; i <= old_nht->hash_mask; i++) { 370 for (i = 0; i <= old_nht->hash_mask; i++) {
363 struct neighbour *n, *next; 371 struct neighbour *n, *next;
364 372
365 for (n = old_nht->hash_buckets[i]; 373 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
374 lockdep_is_held(&tbl->lock));
366 n != NULL; 375 n != NULL;
367 n = next) { 376 n = next) {
368 hash = tbl->hash(n->primary_key, n->dev, 377 hash = tbl->hash(n->primary_key, n->dev,
369 new_nht->hash_rnd); 378 new_nht->hash_rnd);
370 379
371 hash &= new_nht->hash_mask; 380 hash &= new_nht->hash_mask;
372 next = n->next; 381 next = rcu_dereference_protected(n->next,
373 382 lockdep_is_held(&tbl->lock));
374 n->next = new_nht->hash_buckets[hash]; 383
375 new_nht->hash_buckets[hash] = n; 384 rcu_assign_pointer(n->next,
385 rcu_dereference_protected(
386 new_nht->hash_buckets[hash],
387 lockdep_is_held(&tbl->lock)));
388 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
376 } 389 }
377 } 390 }
378 391
@@ -394,15 +407,18 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
394 rcu_read_lock_bh(); 407 rcu_read_lock_bh();
395 nht = rcu_dereference_bh(tbl->nht); 408 nht = rcu_dereference_bh(tbl->nht);
396 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask; 409 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
397 read_lock(&tbl->lock); 410
398 for (n = nht->hash_buckets[hash_val]; n; n = n->next) { 411 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
412 n != NULL;
413 n = rcu_dereference_bh(n->next)) {
399 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { 414 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
400 neigh_hold(n); 415 if (!atomic_inc_not_zero(&n->refcnt))
416 n = NULL;
401 NEIGH_CACHE_STAT_INC(tbl, hits); 417 NEIGH_CACHE_STAT_INC(tbl, hits);
402 break; 418 break;
403 } 419 }
404 } 420 }
405 read_unlock(&tbl->lock); 421
406 rcu_read_unlock_bh(); 422 rcu_read_unlock_bh();
407 return n; 423 return n;
408} 424}
@@ -421,16 +437,19 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
421 rcu_read_lock_bh(); 437 rcu_read_lock_bh();
422 nht = rcu_dereference_bh(tbl->nht); 438 nht = rcu_dereference_bh(tbl->nht);
423 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask; 439 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask;
424 read_lock(&tbl->lock); 440
425 for (n = nht->hash_buckets[hash_val]; n; n = n->next) { 441 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
442 n != NULL;
443 n = rcu_dereference_bh(n->next)) {
426 if (!memcmp(n->primary_key, pkey, key_len) && 444 if (!memcmp(n->primary_key, pkey, key_len) &&
427 net_eq(dev_net(n->dev), net)) { 445 net_eq(dev_net(n->dev), net)) {
428 neigh_hold(n); 446 if (!atomic_inc_not_zero(&n->refcnt))
447 n = NULL;
429 NEIGH_CACHE_STAT_INC(tbl, hits); 448 NEIGH_CACHE_STAT_INC(tbl, hits);
430 break; 449 break;
431 } 450 }
432 } 451 }
433 read_unlock(&tbl->lock); 452
434 rcu_read_unlock_bh(); 453 rcu_read_unlock_bh();
435 return n; 454 return n;
436} 455}
@@ -483,7 +502,11 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
483 goto out_tbl_unlock; 502 goto out_tbl_unlock;
484 } 503 }
485 504
486 for (n1 = nht->hash_buckets[hash_val]; n1; n1 = n1->next) { 505 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
506 lockdep_is_held(&tbl->lock));
507 n1 != NULL;
508 n1 = rcu_dereference_protected(n1->next,
509 lockdep_is_held(&tbl->lock))) {
487 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { 510 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
488 neigh_hold(n1); 511 neigh_hold(n1);
489 rc = n1; 512 rc = n1;
@@ -491,10 +514,12 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
491 } 514 }
492 } 515 }
493 516
494 n->next = nht->hash_buckets[hash_val];
495 nht->hash_buckets[hash_val] = n;
496 n->dead = 0; 517 n->dead = 0;
497 neigh_hold(n); 518 neigh_hold(n);
519 rcu_assign_pointer(n->next,
520 rcu_dereference_protected(nht->hash_buckets[hash_val],
521 lockdep_is_held(&tbl->lock)));
522 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
498 write_unlock_bh(&tbl->lock); 523 write_unlock_bh(&tbl->lock);
499 NEIGH_PRINTK2("neigh %p is created.\n", n); 524 NEIGH_PRINTK2("neigh %p is created.\n", n);
500 rc = n; 525 rc = n;
@@ -651,6 +676,12 @@ static inline void neigh_parms_put(struct neigh_parms *parms)
651 neigh_parms_destroy(parms); 676 neigh_parms_destroy(parms);
652} 677}
653 678
679static void neigh_destroy_rcu(struct rcu_head *head)
680{
681 struct neighbour *neigh = container_of(head, struct neighbour, rcu);
682
683 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
684}
654/* 685/*
655 * neighbour must already be out of the table; 686 * neighbour must already be out of the table;
656 * 687 *
@@ -690,7 +721,7 @@ void neigh_destroy(struct neighbour *neigh)
690 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh); 721 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
691 722
692 atomic_dec(&neigh->tbl->entries); 723 atomic_dec(&neigh->tbl->entries);
693 kmem_cache_free(neigh->tbl->kmem_cachep, neigh); 724 call_rcu(&neigh->rcu, neigh_destroy_rcu);
694} 725}
695EXPORT_SYMBOL(neigh_destroy); 726EXPORT_SYMBOL(neigh_destroy);
696 727
@@ -731,7 +762,8 @@ static void neigh_connect(struct neighbour *neigh)
731static void neigh_periodic_work(struct work_struct *work) 762static void neigh_periodic_work(struct work_struct *work)
732{ 763{
733 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); 764 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
734 struct neighbour *n, **np; 765 struct neighbour *n;
766 struct neighbour __rcu **np;
735 unsigned int i; 767 unsigned int i;
736 struct neigh_hash_table *nht; 768 struct neigh_hash_table *nht;
737 769
@@ -756,7 +788,8 @@ static void neigh_periodic_work(struct work_struct *work)
756 for (i = 0 ; i <= nht->hash_mask; i++) { 788 for (i = 0 ; i <= nht->hash_mask; i++) {
757 np = &nht->hash_buckets[i]; 789 np = &nht->hash_buckets[i];
758 790
759 while ((n = *np) != NULL) { 791 while ((n = rcu_dereference_protected(*np,
792 lockdep_is_held(&tbl->lock))) != NULL) {
760 unsigned int state; 793 unsigned int state;
761 794
762 write_lock(&n->lock); 795 write_lock(&n->lock);
@@ -1213,8 +1246,8 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1213} 1246}
1214 1247
1215/* This function can be used in contexts, where only old dev_queue_xmit 1248/* This function can be used in contexts, where only old dev_queue_xmit
1216 worked, f.e. if you want to override normal output path (eql, shaper), 1249 * worked, f.e. if you want to override normal output path (eql, shaper),
1217 but resolution is not made yet. 1250 * but resolution is not made yet.
1218 */ 1251 */
1219 1252
1220int neigh_compat_output(struct sk_buff *skb) 1253int neigh_compat_output(struct sk_buff *skb)
@@ -2123,7 +2156,7 @@ static void neigh_update_notify(struct neighbour *neigh)
2123static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2156static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2124 struct netlink_callback *cb) 2157 struct netlink_callback *cb)
2125{ 2158{
2126 struct net * net = sock_net(skb->sk); 2159 struct net *net = sock_net(skb->sk);
2127 struct neighbour *n; 2160 struct neighbour *n;
2128 int rc, h, s_h = cb->args[1]; 2161 int rc, h, s_h = cb->args[1];
2129 int idx, s_idx = idx = cb->args[2]; 2162 int idx, s_idx = idx = cb->args[2];
@@ -2132,13 +2165,14 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2132 rcu_read_lock_bh(); 2165 rcu_read_lock_bh();
2133 nht = rcu_dereference_bh(tbl->nht); 2166 nht = rcu_dereference_bh(tbl->nht);
2134 2167
2135 read_lock(&tbl->lock);
2136 for (h = 0; h <= nht->hash_mask; h++) { 2168 for (h = 0; h <= nht->hash_mask; h++) {
2137 if (h < s_h) 2169 if (h < s_h)
2138 continue; 2170 continue;
2139 if (h > s_h) 2171 if (h > s_h)
2140 s_idx = 0; 2172 s_idx = 0;
2141 for (n = nht->hash_buckets[h], idx = 0; n; n = n->next) { 2173 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2174 n != NULL;
2175 n = rcu_dereference_bh(n->next)) {
2142 if (!net_eq(dev_net(n->dev), net)) 2176 if (!net_eq(dev_net(n->dev), net))
2143 continue; 2177 continue;
2144 if (idx < s_idx) 2178 if (idx < s_idx)
@@ -2150,13 +2184,12 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2150 rc = -1; 2184 rc = -1;
2151 goto out; 2185 goto out;
2152 } 2186 }
2153 next: 2187next:
2154 idx++; 2188 idx++;
2155 } 2189 }
2156 } 2190 }
2157 rc = skb->len; 2191 rc = skb->len;
2158out: 2192out:
2159 read_unlock(&tbl->lock);
2160 rcu_read_unlock_bh(); 2193 rcu_read_unlock_bh();
2161 cb->args[1] = h; 2194 cb->args[1] = h;
2162 cb->args[2] = idx; 2195 cb->args[2] = idx;
@@ -2195,11 +2228,13 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void
2195 rcu_read_lock_bh(); 2228 rcu_read_lock_bh();
2196 nht = rcu_dereference_bh(tbl->nht); 2229 nht = rcu_dereference_bh(tbl->nht);
2197 2230
2198 read_lock(&tbl->lock); 2231 read_lock(&tbl->lock); /* avoid resizes */
2199 for (chain = 0; chain <= nht->hash_mask; chain++) { 2232 for (chain = 0; chain <= nht->hash_mask; chain++) {
2200 struct neighbour *n; 2233 struct neighbour *n;
2201 2234
2202 for (n = nht->hash_buckets[chain]; n; n = n->next) 2235 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2236 n != NULL;
2237 n = rcu_dereference_bh(n->next))
2203 cb(n, cookie); 2238 cb(n, cookie);
2204 } 2239 }
2205 read_unlock(&tbl->lock); 2240 read_unlock(&tbl->lock);
@@ -2217,16 +2252,20 @@ void __neigh_for_each_release(struct neigh_table *tbl,
2217 nht = rcu_dereference_protected(tbl->nht, 2252 nht = rcu_dereference_protected(tbl->nht,
2218 lockdep_is_held(&tbl->lock)); 2253 lockdep_is_held(&tbl->lock));
2219 for (chain = 0; chain <= nht->hash_mask; chain++) { 2254 for (chain = 0; chain <= nht->hash_mask; chain++) {
2220 struct neighbour *n, **np; 2255 struct neighbour *n;
2256 struct neighbour __rcu **np;
2221 2257
2222 np = &nht->hash_buckets[chain]; 2258 np = &nht->hash_buckets[chain];
2223 while ((n = *np) != NULL) { 2259 while ((n = rcu_dereference_protected(*np,
2260 lockdep_is_held(&tbl->lock))) != NULL) {
2224 int release; 2261 int release;
2225 2262
2226 write_lock(&n->lock); 2263 write_lock(&n->lock);
2227 release = cb(n); 2264 release = cb(n);
2228 if (release) { 2265 if (release) {
2229 *np = n->next; 2266 rcu_assign_pointer(*np,
2267 rcu_dereference_protected(n->next,
2268 lockdep_is_held(&tbl->lock)));
2230 n->dead = 1; 2269 n->dead = 1;
2231 } else 2270 } else
2232 np = &n->next; 2271 np = &n->next;
@@ -2250,7 +2289,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
2250 2289
2251 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 2290 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2252 for (bucket = 0; bucket <= nht->hash_mask; bucket++) { 2291 for (bucket = 0; bucket <= nht->hash_mask; bucket++) {
2253 n = nht->hash_buckets[bucket]; 2292 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2254 2293
2255 while (n) { 2294 while (n) {
2256 if (!net_eq(dev_net(n->dev), net)) 2295 if (!net_eq(dev_net(n->dev), net))
@@ -2267,8 +2306,8 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
2267 break; 2306 break;
2268 if (n->nud_state & ~NUD_NOARP) 2307 if (n->nud_state & ~NUD_NOARP)
2269 break; 2308 break;
2270 next: 2309next:
2271 n = n->next; 2310 n = rcu_dereference_bh(n->next);
2272 } 2311 }
2273 2312
2274 if (n) 2313 if (n)
@@ -2292,7 +2331,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2292 if (v) 2331 if (v)
2293 return n; 2332 return n;
2294 } 2333 }
2295 n = n->next; 2334 n = rcu_dereference_bh(n->next);
2296 2335
2297 while (1) { 2336 while (1) {
2298 while (n) { 2337 while (n) {
@@ -2309,8 +2348,8 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2309 2348
2310 if (n->nud_state & ~NUD_NOARP) 2349 if (n->nud_state & ~NUD_NOARP)
2311 break; 2350 break;
2312 next: 2351next:
2313 n = n->next; 2352 n = rcu_dereference_bh(n->next);
2314 } 2353 }
2315 2354
2316 if (n) 2355 if (n)
@@ -2319,7 +2358,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2319 if (++state->bucket > nht->hash_mask) 2358 if (++state->bucket > nht->hash_mask)
2320 break; 2359 break;
2321 2360
2322 n = nht->hash_buckets[state->bucket]; 2361 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2323 } 2362 }
2324 2363
2325 if (n && pos) 2364 if (n && pos)
@@ -2417,7 +2456,6 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2417} 2456}
2418 2457
2419void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) 2458void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2420 __acquires(tbl->lock)
2421 __acquires(rcu_bh) 2459 __acquires(rcu_bh)
2422{ 2460{
2423 struct neigh_seq_state *state = seq->private; 2461 struct neigh_seq_state *state = seq->private;
@@ -2428,7 +2466,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2428 2466
2429 rcu_read_lock_bh(); 2467 rcu_read_lock_bh();
2430 state->nht = rcu_dereference_bh(tbl->nht); 2468 state->nht = rcu_dereference_bh(tbl->nht);
2431 read_lock(&tbl->lock); 2469
2432 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; 2470 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2433} 2471}
2434EXPORT_SYMBOL(neigh_seq_start); 2472EXPORT_SYMBOL(neigh_seq_start);
@@ -2461,13 +2499,8 @@ out:
2461EXPORT_SYMBOL(neigh_seq_next); 2499EXPORT_SYMBOL(neigh_seq_next);
2462 2500
2463void neigh_seq_stop(struct seq_file *seq, void *v) 2501void neigh_seq_stop(struct seq_file *seq, void *v)
2464 __releases(tbl->lock)
2465 __releases(rcu_bh) 2502 __releases(rcu_bh)
2466{ 2503{
2467 struct neigh_seq_state *state = seq->private;
2468 struct neigh_table *tbl = state->tbl;
2469
2470 read_unlock(&tbl->lock);
2471 rcu_read_unlock_bh(); 2504 rcu_read_unlock_bh();
2472} 2505}
2473EXPORT_SYMBOL(neigh_seq_stop); 2506EXPORT_SYMBOL(neigh_seq_stop);