diff options
author | Thomas Graf <tgraf@suug.ch> | 2015-01-12 18:58:21 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-14 00:21:44 -0500 |
commit | 80ca8c3a84c74a87977558861bb8eef650732912 (patch) | |
tree | e275dc10e62e28ea8b7f4c25e6a30664974e233f /lib/rhashtable.c | |
parent | df8a39defad46b83694ea6dd868d332976d62cc0 (diff) |
rhashtable: Lower/upper bucket may map to same lock while shrinking
Each per bucket lock covers a configurable number of buckets. While
shrinking, two buckets in the old table contain entries for a single
bucket in the new table. We need to lock down both while linking.
Check if they are protected by different locks to avoid a recursive
lock.
Fixes: 97defe1e ("rhashtable: Per bucket locks & deferred expansion/shrinking")
Reported-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 15 |
1 files changed, 12 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index ed6ae1ad304c..aca699813ba9 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -443,8 +443,16 @@ int rhashtable_shrink(struct rhashtable *ht) | |||
443 | new_bucket_lock = bucket_lock(new_tbl, new_hash); | 443 | new_bucket_lock = bucket_lock(new_tbl, new_hash); |
444 | 444 | ||
445 | spin_lock_bh(old_bucket_lock1); | 445 | spin_lock_bh(old_bucket_lock1); |
446 | spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); | 446 | |
447 | spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); | 447 | /* Depending on the lock per buckets mapping, the bucket in |
448 | * the lower and upper region may map to the same lock. | ||
449 | */ | ||
450 | if (old_bucket_lock1 != old_bucket_lock2) { | ||
451 | spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); | ||
452 | spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); | ||
453 | } else { | ||
454 | spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); | ||
455 | } | ||
448 | 456 | ||
449 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), | 457 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), |
450 | tbl->buckets[new_hash]); | 458 | tbl->buckets[new_hash]); |
@@ -452,7 +460,8 @@ int rhashtable_shrink(struct rhashtable *ht) | |||
452 | tbl->buckets[new_hash + new_tbl->size]); | 460 | tbl->buckets[new_hash + new_tbl->size]); |
453 | 461 | ||
454 | spin_unlock_bh(new_bucket_lock); | 462 | spin_unlock_bh(new_bucket_lock); |
455 | spin_unlock_bh(old_bucket_lock2); | 463 | if (old_bucket_lock1 != old_bucket_lock2) |
464 | spin_unlock_bh(old_bucket_lock2); | ||
456 | spin_unlock_bh(old_bucket_lock1); | 465 | spin_unlock_bh(old_bucket_lock1); |
457 | } | 466 | } |
458 | 467 | ||