aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJulian Anastasov <ja@ssi.bg>2006-10-03 18:49:46 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-10-04 03:31:01 -0400
commitc5e29460f5f9eb189cab5d9fdaa137e64f7734b6 (patch)
treee498a4b18033c79b1c7cb4aa3ba35b5c185a3676 /net
parent1e0c14f49d6b393179f423abbac47f85618d3d46 (diff)
[NEIGH]: always use hash_mask under tbl lock
Make sure hash_mask is protected with tbl->lock in all cases just like the hash_buckets. Signed-off-by: Julian Anastasov <ja@ssi.bg> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/neighbour.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8ce8c471d868..b4b478353b27 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -344,12 +344,12 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
344{ 344{
345 struct neighbour *n; 345 struct neighbour *n;
346 int key_len = tbl->key_len; 346 int key_len = tbl->key_len;
347 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask; 347 u32 hash_val = tbl->hash(pkey, dev);
348 348
349 NEIGH_CACHE_STAT_INC(tbl, lookups); 349 NEIGH_CACHE_STAT_INC(tbl, lookups);
350 350
351 read_lock_bh(&tbl->lock); 351 read_lock_bh(&tbl->lock);
352 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { 352 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
353 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { 353 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
354 neigh_hold(n); 354 neigh_hold(n);
355 NEIGH_CACHE_STAT_INC(tbl, hits); 355 NEIGH_CACHE_STAT_INC(tbl, hits);
@@ -364,12 +364,12 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
364{ 364{
365 struct neighbour *n; 365 struct neighbour *n;
366 int key_len = tbl->key_len; 366 int key_len = tbl->key_len;
367 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask; 367 u32 hash_val = tbl->hash(pkey, NULL);
368 368
369 NEIGH_CACHE_STAT_INC(tbl, lookups); 369 NEIGH_CACHE_STAT_INC(tbl, lookups);
370 370
371 read_lock_bh(&tbl->lock); 371 read_lock_bh(&tbl->lock);
372 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { 372 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
373 if (!memcmp(n->primary_key, pkey, key_len)) { 373 if (!memcmp(n->primary_key, pkey, key_len)) {
374 neigh_hold(n); 374 neigh_hold(n);
375 NEIGH_CACHE_STAT_INC(tbl, hits); 375 NEIGH_CACHE_STAT_INC(tbl, hits);
@@ -1998,12 +1998,12 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1998 int rc, h, s_h = cb->args[1]; 1998 int rc, h, s_h = cb->args[1];
1999 int idx, s_idx = idx = cb->args[2]; 1999 int idx, s_idx = idx = cb->args[2];
2000 2000
2001 read_lock_bh(&tbl->lock);
2001 for (h = 0; h <= tbl->hash_mask; h++) { 2002 for (h = 0; h <= tbl->hash_mask; h++) {
2002 if (h < s_h) 2003 if (h < s_h)
2003 continue; 2004 continue;
2004 if (h > s_h) 2005 if (h > s_h)
2005 s_idx = 0; 2006 s_idx = 0;
2006 read_lock_bh(&tbl->lock);
2007 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) { 2007 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2008 if (idx < s_idx) 2008 if (idx < s_idx)
2009 continue; 2009 continue;
@@ -2016,8 +2016,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2016 goto out; 2016 goto out;
2017 } 2017 }
2018 } 2018 }
2019 read_unlock_bh(&tbl->lock);
2020 } 2019 }
2020 read_unlock_bh(&tbl->lock);
2021 rc = skb->len; 2021 rc = skb->len;
2022out: 2022out:
2023 cb->args[1] = h; 2023 cb->args[1] = h;