diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-03-13 22:57:21 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-15 01:35:34 -0400 |
commit | 8f2484bdb55daa53ecaddb5fa4c298e3d262b69e (patch) | |
tree | 4d27176d889d22df4587fd4e94ff85d406409b8a | |
parent | eddee5ba34eb6c9890ef106f19ead2b370e5342f (diff) |
rhashtable: Use SINGLE_DEPTH_NESTING
We only nest one level deep there is no need to roll our own
subclasses.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | lib/rhashtable.c | 9 |
1 files changed, 2 insertions, 7 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index f7c76079f8f1..5d06cc2b1e4a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -33,11 +33,6 @@ | |||
33 | /* Base bits plus 1 bit for nulls marker */ | 33 | /* Base bits plus 1 bit for nulls marker */ |
34 | #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) | 34 | #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) |
35 | 35 | ||
36 | enum { | ||
37 | RHT_LOCK_NORMAL, | ||
38 | RHT_LOCK_NESTED, | ||
39 | }; | ||
40 | |||
41 | /* The bucket lock is selected based on the hash and protects mutations | 36 | /* The bucket lock is selected based on the hash and protects mutations |
42 | * on a group of hash buckets. | 37 | * on a group of hash buckets. |
43 | * | 38 | * |
@@ -231,7 +226,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) | |||
231 | 226 | ||
232 | new_bucket_lock = bucket_lock(new_tbl, new_hash); | 227 | new_bucket_lock = bucket_lock(new_tbl, new_hash); |
233 | 228 | ||
234 | spin_lock_nested(new_bucket_lock, RHT_LOCK_NESTED); | 229 | spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); |
235 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], | 230 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], |
236 | new_tbl, new_hash); | 231 | new_tbl, new_hash); |
237 | 232 | ||
@@ -405,7 +400,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | |||
405 | tbl = rht_dereference_rcu(ht->future_tbl, ht); | 400 | tbl = rht_dereference_rcu(ht->future_tbl, ht); |
406 | if (tbl != old_tbl) { | 401 | if (tbl != old_tbl) { |
407 | hash = head_hashfn(ht, tbl, obj); | 402 | hash = head_hashfn(ht, tbl, obj); |
408 | spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED); | 403 | spin_lock_nested(bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); |
409 | } | 404 | } |
410 | 405 | ||
411 | if (compare && | 406 | if (compare && |