diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2019-05-16 03:19:46 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-05-16 12:45:20 -0400 |
commit | ba6306e3f648a857ae52ddcabc2859542fd2f94c (patch) | |
tree | 8d9582c2d800f1a7cfa3daef5afb67f263ead579 | |
parent | c7d5ec26ea4adf450d9ab2b794e7735761a93af1 (diff) |
rhashtable: Remove RCU marking from rhash_lock_head
The opaque type rhash_lock_head should not be marked with __rcu
because it can never be dereferenced. We should apply the RCU
marking when we turn it into a pointer which can be dereferenced.
This patch does exactly that. This fixes a number of sparse
warnings as well as getting rid of some unnecessary RCU checking.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/rhashtable.h | 58 | ||||
-rw-r--r-- | lib/rhashtable.c | 28 |
2 files changed, 46 insertions, 40 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index f7714d3b46bd..9f8bc06d4136 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -84,7 +84,7 @@ struct bucket_table { | |||
84 | 84 | ||
85 | struct lockdep_map dep_map; | 85 | struct lockdep_map dep_map; |
86 | 86 | ||
87 | struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; | 87 | struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; |
88 | }; | 88 | }; |
89 | 89 | ||
90 | /* | 90 | /* |
@@ -261,13 +261,13 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, | |||
261 | void *arg); | 261 | void *arg); |
262 | void rhashtable_destroy(struct rhashtable *ht); | 262 | void rhashtable_destroy(struct rhashtable *ht); |
263 | 263 | ||
264 | struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, | 264 | struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, |
265 | unsigned int hash); | 265 | unsigned int hash); |
266 | struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, | 266 | struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, |
267 | unsigned int hash); | 267 | unsigned int hash); |
268 | struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, | 268 | struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, |
269 | struct bucket_table *tbl, | 269 | struct bucket_table *tbl, |
270 | unsigned int hash); | 270 | unsigned int hash); |
271 | 271 | ||
272 | #define rht_dereference(p, ht) \ | 272 | #define rht_dereference(p, ht) \ |
273 | rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) | 273 | rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) |
@@ -284,21 +284,21 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, | |||
284 | #define rht_entry(tpos, pos, member) \ | 284 | #define rht_entry(tpos, pos, member) \ |
285 | ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) | 285 | ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) |
286 | 286 | ||
287 | static inline struct rhash_lock_head __rcu *const *rht_bucket( | 287 | static inline struct rhash_lock_head *const *rht_bucket( |
288 | const struct bucket_table *tbl, unsigned int hash) | 288 | const struct bucket_table *tbl, unsigned int hash) |
289 | { | 289 | { |
290 | return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : | 290 | return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : |
291 | &tbl->buckets[hash]; | 291 | &tbl->buckets[hash]; |
292 | } | 292 | } |
293 | 293 | ||
294 | static inline struct rhash_lock_head __rcu **rht_bucket_var( | 294 | static inline struct rhash_lock_head **rht_bucket_var( |
295 | struct bucket_table *tbl, unsigned int hash) | 295 | struct bucket_table *tbl, unsigned int hash) |
296 | { | 296 | { |
297 | return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : | 297 | return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : |
298 | &tbl->buckets[hash]; | 298 | &tbl->buckets[hash]; |
299 | } | 299 | } |
300 | 300 | ||
301 | static inline struct rhash_lock_head __rcu **rht_bucket_insert( | 301 | static inline struct rhash_lock_head **rht_bucket_insert( |
302 | struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) | 302 | struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) |
303 | { | 303 | { |
304 | return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : | 304 | return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : |
@@ -349,6 +349,12 @@ static inline void rht_unlock(struct bucket_table *tbl, | |||
349 | local_bh_enable(); | 349 | local_bh_enable(); |
350 | } | 350 | } |
351 | 351 | ||
352 | static inline struct rhash_head __rcu *__rht_ptr( | ||
353 | struct rhash_lock_head *const *bkt) | ||
354 | { | ||
355 | return (struct rhash_head __rcu *)((unsigned long)*bkt & ~BIT(0)); | ||
356 | } | ||
357 | |||
352 | /* | 358 | /* |
353 | * Where 'bkt' is a bucket and might be locked: | 359 | * Where 'bkt' is a bucket and might be locked: |
354 | * rht_ptr() dereferences that pointer and clears the lock bit. | 360 | * rht_ptr() dereferences that pointer and clears the lock bit. |
@@ -356,30 +362,30 @@ static inline void rht_unlock(struct bucket_table *tbl, | |||
356 | * access is guaranteed, such as when destroying the table. | 362 | * access is guaranteed, such as when destroying the table. |
357 | */ | 363 | */ |
358 | static inline struct rhash_head *rht_ptr( | 364 | static inline struct rhash_head *rht_ptr( |
359 | struct rhash_lock_head __rcu * const *bkt, | 365 | struct rhash_lock_head *const *bkt, |
360 | struct bucket_table *tbl, | 366 | struct bucket_table *tbl, |
361 | unsigned int hash) | 367 | unsigned int hash) |
362 | { | 368 | { |
363 | const struct rhash_lock_head *p = | 369 | struct rhash_head __rcu *p = __rht_ptr(bkt); |
364 | rht_dereference_bucket_rcu(*bkt, tbl, hash); | ||
365 | 370 | ||
366 | if ((((unsigned long)p) & ~BIT(0)) == 0) | 371 | if (!p) |
367 | return RHT_NULLS_MARKER(bkt); | 372 | return RHT_NULLS_MARKER(bkt); |
368 | return (void *)(((unsigned long)p) & ~BIT(0)); | 373 | |
374 | return rht_dereference_bucket_rcu(p, tbl, hash); | ||
369 | } | 375 | } |
370 | 376 | ||
371 | static inline struct rhash_head *rht_ptr_exclusive( | 377 | static inline struct rhash_head *rht_ptr_exclusive( |
372 | struct rhash_lock_head __rcu * const *bkt) | 378 | struct rhash_lock_head *const *bkt) |
373 | { | 379 | { |
374 | const struct rhash_lock_head *p = | 380 | struct rhash_head __rcu *p = __rht_ptr(bkt); |
375 | rcu_dereference_protected(*bkt, 1); | ||
376 | 381 | ||
377 | if (!p) | 382 | if (!p) |
378 | return RHT_NULLS_MARKER(bkt); | 383 | return RHT_NULLS_MARKER(bkt); |
379 | return (void *)(((unsigned long)p) & ~BIT(0)); | 384 | |
385 | return rcu_dereference_protected(p, 1); | ||
380 | } | 386 | } |
381 | 387 | ||
382 | static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, | 388 | static inline void rht_assign_locked(struct rhash_lock_head **bkt, |
383 | struct rhash_head *obj) | 389 | struct rhash_head *obj) |
384 | { | 390 | { |
385 | struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; | 391 | struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; |
@@ -390,7 +396,7 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, | |||
390 | } | 396 | } |
391 | 397 | ||
392 | static inline void rht_assign_unlock(struct bucket_table *tbl, | 398 | static inline void rht_assign_unlock(struct bucket_table *tbl, |
393 | struct rhash_lock_head __rcu **bkt, | 399 | struct rhash_lock_head **bkt, |
394 | struct rhash_head *obj) | 400 | struct rhash_head *obj) |
395 | { | 401 | { |
396 | struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; | 402 | struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; |
@@ -587,7 +593,7 @@ static inline struct rhash_head *__rhashtable_lookup( | |||
587 | .ht = ht, | 593 | .ht = ht, |
588 | .key = key, | 594 | .key = key, |
589 | }; | 595 | }; |
590 | struct rhash_lock_head __rcu * const *bkt; | 596 | struct rhash_lock_head *const *bkt; |
591 | struct bucket_table *tbl; | 597 | struct bucket_table *tbl; |
592 | struct rhash_head *he; | 598 | struct rhash_head *he; |
593 | unsigned int hash; | 599 | unsigned int hash; |
@@ -703,7 +709,7 @@ static inline void *__rhashtable_insert_fast( | |||
703 | .ht = ht, | 709 | .ht = ht, |
704 | .key = key, | 710 | .key = key, |
705 | }; | 711 | }; |
706 | struct rhash_lock_head __rcu **bkt; | 712 | struct rhash_lock_head **bkt; |
707 | struct rhash_head __rcu **pprev; | 713 | struct rhash_head __rcu **pprev; |
708 | struct bucket_table *tbl; | 714 | struct bucket_table *tbl; |
709 | struct rhash_head *head; | 715 | struct rhash_head *head; |
@@ -989,7 +995,7 @@ static inline int __rhashtable_remove_fast_one( | |||
989 | struct rhash_head *obj, const struct rhashtable_params params, | 995 | struct rhash_head *obj, const struct rhashtable_params params, |
990 | bool rhlist) | 996 | bool rhlist) |
991 | { | 997 | { |
992 | struct rhash_lock_head __rcu **bkt; | 998 | struct rhash_lock_head **bkt; |
993 | struct rhash_head __rcu **pprev; | 999 | struct rhash_head __rcu **pprev; |
994 | struct rhash_head *he; | 1000 | struct rhash_head *he; |
995 | unsigned int hash; | 1001 | unsigned int hash; |
@@ -1141,7 +1147,7 @@ static inline int __rhashtable_replace_fast( | |||
1141 | struct rhash_head *obj_old, struct rhash_head *obj_new, | 1147 | struct rhash_head *obj_old, struct rhash_head *obj_new, |
1142 | const struct rhashtable_params params) | 1148 | const struct rhashtable_params params) |
1143 | { | 1149 | { |
1144 | struct rhash_lock_head __rcu **bkt; | 1150 | struct rhash_lock_head **bkt; |
1145 | struct rhash_head __rcu **pprev; | 1151 | struct rhash_head __rcu **pprev; |
1146 | struct rhash_head *he; | 1152 | struct rhash_head *he; |
1147 | unsigned int hash; | 1153 | unsigned int hash; |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6529fe1b45c1..7708699a5b96 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | union nested_table { | 35 | union nested_table { |
36 | union nested_table __rcu *table; | 36 | union nested_table __rcu *table; |
37 | struct rhash_lock_head __rcu *bucket; | 37 | struct rhash_lock_head *bucket; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static u32 head_hashfn(struct rhashtable *ht, | 40 | static u32 head_hashfn(struct rhashtable *ht, |
@@ -216,7 +216,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, | |||
216 | } | 216 | } |
217 | 217 | ||
218 | static int rhashtable_rehash_one(struct rhashtable *ht, | 218 | static int rhashtable_rehash_one(struct rhashtable *ht, |
219 | struct rhash_lock_head __rcu **bkt, | 219 | struct rhash_lock_head **bkt, |
220 | unsigned int old_hash) | 220 | unsigned int old_hash) |
221 | { | 221 | { |
222 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 222 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
@@ -269,7 +269,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, | |||
269 | unsigned int old_hash) | 269 | unsigned int old_hash) |
270 | { | 270 | { |
271 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 271 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
272 | struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); | 272 | struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); |
273 | int err; | 273 | int err; |
274 | 274 | ||
275 | if (!bkt) | 275 | if (!bkt) |
@@ -478,7 +478,7 @@ fail: | |||
478 | } | 478 | } |
479 | 479 | ||
480 | static void *rhashtable_lookup_one(struct rhashtable *ht, | 480 | static void *rhashtable_lookup_one(struct rhashtable *ht, |
481 | struct rhash_lock_head __rcu **bkt, | 481 | struct rhash_lock_head **bkt, |
482 | struct bucket_table *tbl, unsigned int hash, | 482 | struct bucket_table *tbl, unsigned int hash, |
483 | const void *key, struct rhash_head *obj) | 483 | const void *key, struct rhash_head *obj) |
484 | { | 484 | { |
@@ -529,7 +529,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, | |||
529 | } | 529 | } |
530 | 530 | ||
531 | static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | 531 | static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, |
532 | struct rhash_lock_head __rcu **bkt, | 532 | struct rhash_lock_head **bkt, |
533 | struct bucket_table *tbl, | 533 | struct bucket_table *tbl, |
534 | unsigned int hash, | 534 | unsigned int hash, |
535 | struct rhash_head *obj, | 535 | struct rhash_head *obj, |
@@ -584,7 +584,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, | |||
584 | { | 584 | { |
585 | struct bucket_table *new_tbl; | 585 | struct bucket_table *new_tbl; |
586 | struct bucket_table *tbl; | 586 | struct bucket_table *tbl; |
587 | struct rhash_lock_head __rcu **bkt; | 587 | struct rhash_lock_head **bkt; |
588 | unsigned int hash; | 588 | unsigned int hash; |
589 | void *data; | 589 | void *data; |
590 | 590 | ||
@@ -1166,8 +1166,8 @@ void rhashtable_destroy(struct rhashtable *ht) | |||
1166 | } | 1166 | } |
1167 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | 1167 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
1168 | 1168 | ||
1169 | struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, | 1169 | struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, |
1170 | unsigned int hash) | 1170 | unsigned int hash) |
1171 | { | 1171 | { |
1172 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | 1172 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
1173 | unsigned int index = hash & ((1 << tbl->nest) - 1); | 1173 | unsigned int index = hash & ((1 << tbl->nest) - 1); |
@@ -1195,10 +1195,10 @@ struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tb | |||
1195 | } | 1195 | } |
1196 | EXPORT_SYMBOL_GPL(__rht_bucket_nested); | 1196 | EXPORT_SYMBOL_GPL(__rht_bucket_nested); |
1197 | 1197 | ||
1198 | struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, | 1198 | struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, |
1199 | unsigned int hash) | 1199 | unsigned int hash) |
1200 | { | 1200 | { |
1201 | static struct rhash_lock_head __rcu *rhnull; | 1201 | static struct rhash_lock_head *rhnull; |
1202 | 1202 | ||
1203 | if (!rhnull) | 1203 | if (!rhnull) |
1204 | INIT_RHT_NULLS_HEAD(rhnull); | 1204 | INIT_RHT_NULLS_HEAD(rhnull); |
@@ -1206,9 +1206,9 @@ struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, | |||
1206 | } | 1206 | } |
1207 | EXPORT_SYMBOL_GPL(rht_bucket_nested); | 1207 | EXPORT_SYMBOL_GPL(rht_bucket_nested); |
1208 | 1208 | ||
1209 | struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, | 1209 | struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, |
1210 | struct bucket_table *tbl, | 1210 | struct bucket_table *tbl, |
1211 | unsigned int hash) | 1211 | unsigned int hash) |
1212 | { | 1212 | { |
1213 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | 1213 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
1214 | unsigned int index = hash & ((1 << tbl->nest) - 1); | 1214 | unsigned int index = hash & ((1 << tbl->nest) - 1); |