summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2019-04-01 19:07:45 -0400
committerDavid S. Miller <davem@davemloft.net>2019-04-07 22:12:12 -0400
commit149212f07856b25a9d342bfd6d736519b2ef66dc (patch)
treeb2fc4e6c9902041bcdb4c8bfc44d2d5d3040088c /lib
parent8f0db018006a421956965e1149234c4e8db718ee (diff)
rhashtable: add lockdep tracking to bucket bit-spin-locks.
Native bit_spin_locks are not tracked by lockdep. The bit_spin_locks used for rhashtable buckets are local to the rhashtable implementation, so there is little opportunity for the sort of misuse that lockdep might detect. However locks are held while a hash function or compare function is called, and if one of these took a lock, a misbehaviour is possible. As it is quite easy to add lockdep support this unlikely possibility seems to be enough justification. So create a lockdep class for bucket bit_spin_lock and attach through a lockdep_map in each bucket_table. Without the 'nested' annotation in rhashtable_rehash_one(), lockdep correctly reports a possible problem as this lock is taken while another bucket lock (in another table) is held. This confirms that the added support works. With the correct nested annotation in place, lockdep reports no problems. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index c5d0974467ee..a8583af43b59 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -173,6 +173,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
173 struct bucket_table *tbl = NULL; 173 struct bucket_table *tbl = NULL;
174 size_t size; 174 size_t size;
175 int i; 175 int i;
176 static struct lock_class_key __key;
176 177
177 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 178 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
178 tbl = kvzalloc(size, gfp); 179 tbl = kvzalloc(size, gfp);
@@ -187,6 +188,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
187 if (tbl == NULL) 188 if (tbl == NULL)
188 return NULL; 189 return NULL;
189 190
191 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
192
190 tbl->size = size; 193 tbl->size = size;
191 194
192 rcu_head_init(&tbl->rcu); 195 rcu_head_init(&tbl->rcu);
@@ -244,14 +247,14 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
244 247
245 new_hash = head_hashfn(ht, new_tbl, entry); 248 new_hash = head_hashfn(ht, new_tbl, entry);
246 249
247 rht_lock(&new_tbl->buckets[new_hash]); 250 rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
248 251
249 head = rht_ptr(rht_dereference_bucket(new_tbl->buckets[new_hash], 252 head = rht_ptr(rht_dereference_bucket(new_tbl->buckets[new_hash],
250 new_tbl, new_hash)); 253 new_tbl, new_hash));
251 254
252 RCU_INIT_POINTER(entry->next, head); 255 RCU_INIT_POINTER(entry->next, head);
253 256
254 rht_assign_unlock(&new_tbl->buckets[new_hash], entry); 257 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
255 258
256 if (pprev) 259 if (pprev)
257 rcu_assign_pointer(*pprev, next); 260 rcu_assign_pointer(*pprev, next);
@@ -272,14 +275,14 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
272 275
273 if (!bkt) 276 if (!bkt)
274 return 0; 277 return 0;
275 rht_lock(bkt); 278 rht_lock(old_tbl, bkt);
276 279
277 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) 280 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
278 ; 281 ;
279 282
280 if (err == -ENOENT) 283 if (err == -ENOENT)
281 err = 0; 284 err = 0;
282 rht_unlock(bkt); 285 rht_unlock(old_tbl, bkt);
283 286
284 return err; 287 return err;
285} 288}
@@ -600,7 +603,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
600 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); 603 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
601 data = ERR_PTR(-EAGAIN); 604 data = ERR_PTR(-EAGAIN);
602 } else { 605 } else {
603 rht_lock(bkt); 606 rht_lock(tbl, bkt);
604 data = rhashtable_lookup_one(ht, bkt, tbl, 607 data = rhashtable_lookup_one(ht, bkt, tbl,
605 hash, key, obj); 608 hash, key, obj);
606 new_tbl = rhashtable_insert_one(ht, bkt, tbl, 609 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
@@ -608,7 +611,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
608 if (PTR_ERR(new_tbl) != -EEXIST) 611 if (PTR_ERR(new_tbl) != -EEXIST)
609 data = ERR_CAST(new_tbl); 612 data = ERR_CAST(new_tbl);
610 613
611 rht_unlock(bkt); 614 rht_unlock(tbl, bkt);
612 } 615 }
613 } while (!IS_ERR_OR_NULL(new_tbl)); 616 } while (!IS_ERR_OR_NULL(new_tbl));
614 617