aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/neighbour.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-10-04 02:15:44 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-05 17:54:36 -0400
commitd6bf781712a1d25cc8987036b3a48535b331eb91 (patch)
treecf83ab68ef2519576578d8336a43cc13cd375cde /net/core/neighbour.c
parent110b2499370c401cdcc7c63e481084467291d556 (diff)
net neigh: RCU conversion of neigh hash table
David This is the first step for RCU conversion of neigh code. Next patches will convert hash_buckets[] and "struct neighbour" to RCU protected objects. Thanks [PATCH net-next] net neigh: RCU conversion of neigh hash table Instead of storing hash_buckets, hash_mask and hash_rnd in "struct neigh_table", a new structure is defined : struct neigh_hash_table { struct neighbour **hash_buckets; unsigned int hash_mask; __u32 hash_rnd; struct rcu_head rcu; }; And "struct neigh_table" has an RCU protected pointer to such a neigh_hash_table. This means the signature of (*hash)() function changed: We need to add a third parameter with the actual hash_rnd value, since this is not anymore a neigh_table field. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/neighbour.c')
-rw-r--r--net/core/neighbour.c219
1 files changed, 137 insertions, 82 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d6996e072a41..dd8920e4f508 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -131,14 +131,17 @@ static int neigh_forced_gc(struct neigh_table *tbl)
131{ 131{
132 int shrunk = 0; 132 int shrunk = 0;
133 int i; 133 int i;
134 struct neigh_hash_table *nht;
134 135
135 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); 136 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
136 137
137 write_lock_bh(&tbl->lock); 138 write_lock_bh(&tbl->lock);
138 for (i = 0; i <= tbl->hash_mask; i++) { 139 nht = rcu_dereference_protected(tbl->nht,
140 lockdep_is_held(&tbl->lock));
141 for (i = 0; i <= nht->hash_mask; i++) {
139 struct neighbour *n, **np; 142 struct neighbour *n, **np;
140 143
141 np = &tbl->hash_buckets[i]; 144 np = &nht->hash_buckets[i];
142 while ((n = *np) != NULL) { 145 while ((n = *np) != NULL) {
143 /* Neighbour record may be discarded if: 146 /* Neighbour record may be discarded if:
144 * - nobody refers to it. 147 * - nobody refers to it.
@@ -199,9 +202,13 @@ static void pneigh_queue_purge(struct sk_buff_head *list)
199static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) 202static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
200{ 203{
201 int i; 204 int i;
205 struct neigh_hash_table *nht;
202 206
203 for (i = 0; i <= tbl->hash_mask; i++) { 207 nht = rcu_dereference_protected(tbl->nht,
204 struct neighbour *n, **np = &tbl->hash_buckets[i]; 208 lockdep_is_held(&tbl->lock));
209
210 for (i = 0; i <= nht->hash_mask; i++) {
211 struct neighbour *n, **np = &nht->hash_buckets[i];
205 212
206 while ((n = *np) != NULL) { 213 while ((n = *np) != NULL) {
207 if (dev && n->dev != dev) { 214 if (dev && n->dev != dev) {
@@ -297,64 +304,81 @@ out_entries:
297 goto out; 304 goto out;
298} 305}
299 306
300static struct neighbour **neigh_hash_alloc(unsigned int entries) 307static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
301{ 308{
302 unsigned long size = entries * sizeof(struct neighbour *); 309 size_t size = entries * sizeof(struct neighbour *);
303 struct neighbour **ret; 310 struct neigh_hash_table *ret;
311 struct neighbour **buckets;
304 312
305 if (size <= PAGE_SIZE) { 313 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
306 ret = kzalloc(size, GFP_ATOMIC); 314 if (!ret)
307 } else { 315 return NULL;
308 ret = (struct neighbour **) 316 if (size <= PAGE_SIZE)
309 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size)); 317 buckets = kzalloc(size, GFP_ATOMIC);
318 else
319 buckets = (struct neighbour **)
320 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
321 get_order(size));
322 if (!buckets) {
323 kfree(ret);
324 return NULL;
310 } 325 }
326 ret->hash_buckets = buckets;
327 ret->hash_mask = entries - 1;
328 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
311 return ret; 329 return ret;
312} 330}
313 331
314static void neigh_hash_free(struct neighbour **hash, unsigned int entries) 332static void neigh_hash_free_rcu(struct rcu_head *head)
315{ 333{
316 unsigned long size = entries * sizeof(struct neighbour *); 334 struct neigh_hash_table *nht = container_of(head,
335 struct neigh_hash_table,
336 rcu);
337 size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
338 struct neighbour **buckets = nht->hash_buckets;
317 339
318 if (size <= PAGE_SIZE) 340 if (size <= PAGE_SIZE)
319 kfree(hash); 341 kfree(buckets);
320 else 342 else
321 free_pages((unsigned long)hash, get_order(size)); 343 free_pages((unsigned long)buckets, get_order(size));
344 kfree(nht);
322} 345}
323 346
324static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries) 347static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
348 unsigned long new_entries)
325{ 349{
326 struct neighbour **new_hash, **old_hash; 350 unsigned int i, hash;
327 unsigned int i, new_hash_mask, old_entries; 351 struct neigh_hash_table *new_nht, *old_nht;
328 352
329 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 353 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
330 354
331 BUG_ON(!is_power_of_2(new_entries)); 355 BUG_ON(!is_power_of_2(new_entries));
332 new_hash = neigh_hash_alloc(new_entries); 356 old_nht = rcu_dereference_protected(tbl->nht,
333 if (!new_hash) 357 lockdep_is_held(&tbl->lock));
334 return; 358 new_nht = neigh_hash_alloc(new_entries);
359 if (!new_nht)
360 return old_nht;
335 361
336 old_entries = tbl->hash_mask + 1; 362 for (i = 0; i <= old_nht->hash_mask; i++) {
337 new_hash_mask = new_entries - 1;
338 old_hash = tbl->hash_buckets;
339
340 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
341 for (i = 0; i < old_entries; i++) {
342 struct neighbour *n, *next; 363 struct neighbour *n, *next;
343 364
344 for (n = old_hash[i]; n; n = next) { 365 for (n = old_nht->hash_buckets[i];
345 unsigned int hash_val = tbl->hash(n->primary_key, n->dev); 366 n != NULL;
367 n = next) {
368 hash = tbl->hash(n->primary_key, n->dev,
369 new_nht->hash_rnd);
346 370
347 hash_val &= new_hash_mask; 371 hash &= new_nht->hash_mask;
348 next = n->next; 372 next = n->next;
349 373
350 n->next = new_hash[hash_val]; 374 n->next = new_nht->hash_buckets[hash];
351 new_hash[hash_val] = n; 375 new_nht->hash_buckets[hash] = n;
352 } 376 }
353 } 377 }
354 tbl->hash_buckets = new_hash;
355 tbl->hash_mask = new_hash_mask;
356 378
357 neigh_hash_free(old_hash, old_entries); 379 rcu_assign_pointer(tbl->nht, new_nht);
380 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
381 return new_nht;
358} 382}
359 383
360struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 384struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -363,19 +387,23 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
363 struct neighbour *n; 387 struct neighbour *n;
364 int key_len = tbl->key_len; 388 int key_len = tbl->key_len;
365 u32 hash_val; 389 u32 hash_val;
390 struct neigh_hash_table *nht;
366 391
367 NEIGH_CACHE_STAT_INC(tbl, lookups); 392 NEIGH_CACHE_STAT_INC(tbl, lookups);
368 393
369 read_lock_bh(&tbl->lock); 394 rcu_read_lock_bh();
370 hash_val = tbl->hash(pkey, dev); 395 nht = rcu_dereference_bh(tbl->nht);
371 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) { 396 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
397 read_lock(&tbl->lock);
398 for (n = nht->hash_buckets[hash_val]; n; n = n->next) {
372 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { 399 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
373 neigh_hold(n); 400 neigh_hold(n);
374 NEIGH_CACHE_STAT_INC(tbl, hits); 401 NEIGH_CACHE_STAT_INC(tbl, hits);
375 break; 402 break;
376 } 403 }
377 } 404 }
378 read_unlock_bh(&tbl->lock); 405 read_unlock(&tbl->lock);
406 rcu_read_unlock_bh();
379 return n; 407 return n;
380} 408}
381EXPORT_SYMBOL(neigh_lookup); 409EXPORT_SYMBOL(neigh_lookup);
@@ -386,12 +414,15 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
386 struct neighbour *n; 414 struct neighbour *n;
387 int key_len = tbl->key_len; 415 int key_len = tbl->key_len;
388 u32 hash_val; 416 u32 hash_val;
417 struct neigh_hash_table *nht;
389 418
390 NEIGH_CACHE_STAT_INC(tbl, lookups); 419 NEIGH_CACHE_STAT_INC(tbl, lookups);
391 420
392 read_lock_bh(&tbl->lock); 421 rcu_read_lock_bh();
393 hash_val = tbl->hash(pkey, NULL); 422 nht = rcu_dereference_bh(tbl->nht);
394 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) { 423 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask;
424 read_lock(&tbl->lock);
425 for (n = nht->hash_buckets[hash_val]; n; n = n->next) {
395 if (!memcmp(n->primary_key, pkey, key_len) && 426 if (!memcmp(n->primary_key, pkey, key_len) &&
396 net_eq(dev_net(n->dev), net)) { 427 net_eq(dev_net(n->dev), net)) {
397 neigh_hold(n); 428 neigh_hold(n);
@@ -399,7 +430,8 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
399 break; 430 break;
400 } 431 }
401 } 432 }
402 read_unlock_bh(&tbl->lock); 433 read_unlock(&tbl->lock);
434 rcu_read_unlock_bh();
403 return n; 435 return n;
404} 436}
405EXPORT_SYMBOL(neigh_lookup_nodev); 437EXPORT_SYMBOL(neigh_lookup_nodev);
@@ -411,6 +443,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
411 int key_len = tbl->key_len; 443 int key_len = tbl->key_len;
412 int error; 444 int error;
413 struct neighbour *n1, *rc, *n = neigh_alloc(tbl); 445 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
446 struct neigh_hash_table *nht;
414 447
415 if (!n) { 448 if (!n) {
416 rc = ERR_PTR(-ENOBUFS); 449 rc = ERR_PTR(-ENOBUFS);
@@ -437,18 +470,20 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
437 n->confirmed = jiffies - (n->parms->base_reachable_time << 1); 470 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
438 471
439 write_lock_bh(&tbl->lock); 472 write_lock_bh(&tbl->lock);
473 nht = rcu_dereference_protected(tbl->nht,
474 lockdep_is_held(&tbl->lock));
440 475
441 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1)) 476 if (atomic_read(&tbl->entries) > (nht->hash_mask + 1))
442 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1); 477 nht = neigh_hash_grow(tbl, (nht->hash_mask + 1) << 1);
443 478
444 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask; 479 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
445 480
446 if (n->parms->dead) { 481 if (n->parms->dead) {
447 rc = ERR_PTR(-EINVAL); 482 rc = ERR_PTR(-EINVAL);
448 goto out_tbl_unlock; 483 goto out_tbl_unlock;
449 } 484 }
450 485
451 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) { 486 for (n1 = nht->hash_buckets[hash_val]; n1; n1 = n1->next) {
452 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { 487 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
453 neigh_hold(n1); 488 neigh_hold(n1);
454 rc = n1; 489 rc = n1;
@@ -456,8 +491,8 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
456 } 491 }
457 } 492 }
458 493
459 n->next = tbl->hash_buckets[hash_val]; 494 n->next = nht->hash_buckets[hash_val];
460 tbl->hash_buckets[hash_val] = n; 495 nht->hash_buckets[hash_val] = n;
461 n->dead = 0; 496 n->dead = 0;
462 neigh_hold(n); 497 neigh_hold(n);
463 write_unlock_bh(&tbl->lock); 498 write_unlock_bh(&tbl->lock);
@@ -698,10 +733,13 @@ static void neigh_periodic_work(struct work_struct *work)
698 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); 733 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
699 struct neighbour *n, **np; 734 struct neighbour *n, **np;
700 unsigned int i; 735 unsigned int i;
736 struct neigh_hash_table *nht;
701 737
702 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); 738 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
703 739
704 write_lock_bh(&tbl->lock); 740 write_lock_bh(&tbl->lock);
741 nht = rcu_dereference_protected(tbl->nht,
742 lockdep_is_held(&tbl->lock));
705 743
706 /* 744 /*
707 * periodically recompute ReachableTime from random function 745 * periodically recompute ReachableTime from random function
@@ -715,8 +753,8 @@ static void neigh_periodic_work(struct work_struct *work)
715 neigh_rand_reach_time(p->base_reachable_time); 753 neigh_rand_reach_time(p->base_reachable_time);
716 } 754 }
717 755
718 for (i = 0 ; i <= tbl->hash_mask; i++) { 756 for (i = 0 ; i <= nht->hash_mask; i++) {
719 np = &tbl->hash_buckets[i]; 757 np = &nht->hash_buckets[i];
720 758
721 while ((n = *np) != NULL) { 759 while ((n = *np) != NULL) {
722 unsigned int state; 760 unsigned int state;
@@ -1438,17 +1476,14 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1438 panic("cannot create neighbour proc dir entry"); 1476 panic("cannot create neighbour proc dir entry");
1439#endif 1477#endif
1440 1478
1441 tbl->hash_mask = 1; 1479 tbl->nht = neigh_hash_alloc(8);
1442 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1443 1480
1444 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1481 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1445 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); 1482 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1446 1483
1447 if (!tbl->hash_buckets || !tbl->phash_buckets) 1484 if (!tbl->nht || !tbl->phash_buckets)
1448 panic("cannot allocate neighbour cache hashes"); 1485 panic("cannot allocate neighbour cache hashes");
1449 1486
1450 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1451
1452 rwlock_init(&tbl->lock); 1487 rwlock_init(&tbl->lock);
1453 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); 1488 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1454 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); 1489 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
@@ -1504,8 +1539,8 @@ int neigh_table_clear(struct neigh_table *tbl)
1504 } 1539 }
1505 write_unlock(&neigh_tbl_lock); 1540 write_unlock(&neigh_tbl_lock);
1506 1541
1507 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1); 1542 call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
1508 tbl->hash_buckets = NULL; 1543 tbl->nht = NULL;
1509 1544
1510 kfree(tbl->phash_buckets); 1545 kfree(tbl->phash_buckets);
1511 tbl->phash_buckets = NULL; 1546 tbl->phash_buckets = NULL;
@@ -1745,18 +1780,22 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1745 unsigned long now = jiffies; 1780 unsigned long now = jiffies;
1746 unsigned int flush_delta = now - tbl->last_flush; 1781 unsigned int flush_delta = now - tbl->last_flush;
1747 unsigned int rand_delta = now - tbl->last_rand; 1782 unsigned int rand_delta = now - tbl->last_rand;
1748 1783 struct neigh_hash_table *nht;
1749 struct ndt_config ndc = { 1784 struct ndt_config ndc = {
1750 .ndtc_key_len = tbl->key_len, 1785 .ndtc_key_len = tbl->key_len,
1751 .ndtc_entry_size = tbl->entry_size, 1786 .ndtc_entry_size = tbl->entry_size,
1752 .ndtc_entries = atomic_read(&tbl->entries), 1787 .ndtc_entries = atomic_read(&tbl->entries),
1753 .ndtc_last_flush = jiffies_to_msecs(flush_delta), 1788 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1754 .ndtc_last_rand = jiffies_to_msecs(rand_delta), 1789 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1755 .ndtc_hash_rnd = tbl->hash_rnd,
1756 .ndtc_hash_mask = tbl->hash_mask,
1757 .ndtc_proxy_qlen = tbl->proxy_queue.qlen, 1790 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1758 }; 1791 };
1759 1792
1793 rcu_read_lock_bh();
1794 nht = rcu_dereference_bh(tbl->nht);
1795 ndc.ndtc_hash_rnd = nht->hash_rnd;
1796 ndc.ndtc_hash_mask = nht->hash_mask;
1797 rcu_read_unlock_bh();
1798
1760 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc); 1799 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1761 } 1800 }
1762 1801
@@ -2088,14 +2127,18 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2088 struct neighbour *n; 2127 struct neighbour *n;
2089 int rc, h, s_h = cb->args[1]; 2128 int rc, h, s_h = cb->args[1];
2090 int idx, s_idx = idx = cb->args[2]; 2129 int idx, s_idx = idx = cb->args[2];
2130 struct neigh_hash_table *nht;
2091 2131
2092 read_lock_bh(&tbl->lock); 2132 rcu_read_lock_bh();
2093 for (h = 0; h <= tbl->hash_mask; h++) { 2133 nht = rcu_dereference_bh(tbl->nht);
2134
2135 read_lock(&tbl->lock);
2136 for (h = 0; h <= nht->hash_mask; h++) {
2094 if (h < s_h) 2137 if (h < s_h)
2095 continue; 2138 continue;
2096 if (h > s_h) 2139 if (h > s_h)
2097 s_idx = 0; 2140 s_idx = 0;
2098 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) { 2141 for (n = nht->hash_buckets[h], idx = 0; n; n = n->next) {
2099 if (!net_eq(dev_net(n->dev), net)) 2142 if (!net_eq(dev_net(n->dev), net))
2100 continue; 2143 continue;
2101 if (idx < s_idx) 2144 if (idx < s_idx)
@@ -2104,7 +2147,6 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2104 cb->nlh->nlmsg_seq, 2147 cb->nlh->nlmsg_seq,
2105 RTM_NEWNEIGH, 2148 RTM_NEWNEIGH,
2106 NLM_F_MULTI) <= 0) { 2149 NLM_F_MULTI) <= 0) {
2107 read_unlock_bh(&tbl->lock);
2108 rc = -1; 2150 rc = -1;
2109 goto out; 2151 goto out;
2110 } 2152 }
@@ -2112,9 +2154,10 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2112 idx++; 2154 idx++;
2113 } 2155 }
2114 } 2156 }
2115 read_unlock_bh(&tbl->lock);
2116 rc = skb->len; 2157 rc = skb->len;
2117out: 2158out:
2159 read_unlock(&tbl->lock);
2160 rcu_read_unlock_bh();
2118 cb->args[1] = h; 2161 cb->args[1] = h;
2119 cb->args[2] = idx; 2162 cb->args[2] = idx;
2120 return rc; 2163 return rc;
@@ -2147,15 +2190,20 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2147void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) 2190void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2148{ 2191{
2149 int chain; 2192 int chain;
2193 struct neigh_hash_table *nht;
2150 2194
2151 read_lock_bh(&tbl->lock); 2195 rcu_read_lock_bh();
2152 for (chain = 0; chain <= tbl->hash_mask; chain++) { 2196 nht = rcu_dereference_bh(tbl->nht);
2197
2198 read_lock(&tbl->lock);
2199 for (chain = 0; chain <= nht->hash_mask; chain++) {
2153 struct neighbour *n; 2200 struct neighbour *n;
2154 2201
2155 for (n = tbl->hash_buckets[chain]; n; n = n->next) 2202 for (n = nht->hash_buckets[chain]; n; n = n->next)
2156 cb(n, cookie); 2203 cb(n, cookie);
2157 } 2204 }
2158 read_unlock_bh(&tbl->lock); 2205 read_unlock(&tbl->lock);
2206 rcu_read_unlock_bh();
2159} 2207}
2160EXPORT_SYMBOL(neigh_for_each); 2208EXPORT_SYMBOL(neigh_for_each);
2161 2209
@@ -2164,11 +2212,14 @@ void __neigh_for_each_release(struct neigh_table *tbl,
2164 int (*cb)(struct neighbour *)) 2212 int (*cb)(struct neighbour *))
2165{ 2213{
2166 int chain; 2214 int chain;
2215 struct neigh_hash_table *nht;
2167 2216
2168 for (chain = 0; chain <= tbl->hash_mask; chain++) { 2217 nht = rcu_dereference_protected(tbl->nht,
2218 lockdep_is_held(&tbl->lock));
2219 for (chain = 0; chain <= nht->hash_mask; chain++) {
2169 struct neighbour *n, **np; 2220 struct neighbour *n, **np;
2170 2221
2171 np = &tbl->hash_buckets[chain]; 2222 np = &nht->hash_buckets[chain];
2172 while ((n = *np) != NULL) { 2223 while ((n = *np) != NULL) {
2173 int release; 2224 int release;
2174 2225
@@ -2193,13 +2244,13 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
2193{ 2244{
2194 struct neigh_seq_state *state = seq->private; 2245 struct neigh_seq_state *state = seq->private;
2195 struct net *net = seq_file_net(seq); 2246 struct net *net = seq_file_net(seq);
2196 struct neigh_table *tbl = state->tbl; 2247 struct neigh_hash_table *nht = state->nht;
2197 struct neighbour *n = NULL; 2248 struct neighbour *n = NULL;
2198 int bucket = state->bucket; 2249 int bucket = state->bucket;
2199 2250
2200 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 2251 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2201 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) { 2252 for (bucket = 0; bucket <= nht->hash_mask; bucket++) {
2202 n = tbl->hash_buckets[bucket]; 2253 n = nht->hash_buckets[bucket];
2203 2254
2204 while (n) { 2255 while (n) {
2205 if (!net_eq(dev_net(n->dev), net)) 2256 if (!net_eq(dev_net(n->dev), net))
@@ -2234,7 +2285,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2234{ 2285{
2235 struct neigh_seq_state *state = seq->private; 2286 struct neigh_seq_state *state = seq->private;
2236 struct net *net = seq_file_net(seq); 2287 struct net *net = seq_file_net(seq);
2237 struct neigh_table *tbl = state->tbl; 2288 struct neigh_hash_table *nht = state->nht;
2238 2289
2239 if (state->neigh_sub_iter) { 2290 if (state->neigh_sub_iter) {
2240 void *v = state->neigh_sub_iter(state, n, pos); 2291 void *v = state->neigh_sub_iter(state, n, pos);
@@ -2265,10 +2316,10 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
2265 if (n) 2316 if (n)
2266 break; 2317 break;
2267 2318
2268 if (++state->bucket > tbl->hash_mask) 2319 if (++state->bucket > nht->hash_mask)
2269 break; 2320 break;
2270 2321
2271 n = tbl->hash_buckets[state->bucket]; 2322 n = nht->hash_buckets[state->bucket];
2272 } 2323 }
2273 2324
2274 if (n && pos) 2325 if (n && pos)
@@ -2367,6 +2418,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2367 2418
2368void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) 2419void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2369 __acquires(tbl->lock) 2420 __acquires(tbl->lock)
2421 __acquires(rcu_bh)
2370{ 2422{
2371 struct neigh_seq_state *state = seq->private; 2423 struct neigh_seq_state *state = seq->private;
2372 2424
@@ -2374,8 +2426,9 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2374 state->bucket = 0; 2426 state->bucket = 0;
2375 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); 2427 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2376 2428
2377 read_lock_bh(&tbl->lock); 2429 rcu_read_lock_bh();
2378 2430 state->nht = rcu_dereference_bh(tbl->nht);
2431 read_lock(&tbl->lock);
2379 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; 2432 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2380} 2433}
2381EXPORT_SYMBOL(neigh_seq_start); 2434EXPORT_SYMBOL(neigh_seq_start);
@@ -2409,11 +2462,13 @@ EXPORT_SYMBOL(neigh_seq_next);
2409 2462
2410void neigh_seq_stop(struct seq_file *seq, void *v) 2463void neigh_seq_stop(struct seq_file *seq, void *v)
2411 __releases(tbl->lock) 2464 __releases(tbl->lock)
2465 __releases(rcu_bh)
2412{ 2466{
2413 struct neigh_seq_state *state = seq->private; 2467 struct neigh_seq_state *state = seq->private;
2414 struct neigh_table *tbl = state->tbl; 2468 struct neigh_table *tbl = state->tbl;
2415 2469
2416 read_unlock_bh(&tbl->lock); 2470 read_unlock(&tbl->lock);
2471 rcu_read_unlock_bh();
2417} 2472}
2418EXPORT_SYMBOL(neigh_seq_stop); 2473EXPORT_SYMBOL(neigh_seq_stop);
2419 2474