summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2018-02-27 19:56:43 -0500
committerJens Axboe <axboe@kernel.dk>2018-02-28 14:23:35 -0500
commit4ace53f1ed40a5cfee4bdd7614c8a8b2798227ad (patch)
treec766967b9ac9c5ae7a04035c0fa182fb27167d33
parente9a99a638800af25c7ed006c96fd1dabb99254b7 (diff)
sbitmap: use test_and_set_bit_lock()/clear_bit_unlock()
sbitmap_queue_get()/sbitmap_queue_clear() are used for allocating/freeing a resource, so they should provide acquire/release barrier semantics, respectively. sbitmap_get() currently contains a full barrier, which is unnecessary, so use test_and_set_bit_lock() instead of test_and_set_bit() (these are equivalent on x86_64). sbitmap_clear_bit() does not imply any barriers, which is incorrect, as accesses of the resource (e.g., request) could potentially get reordered to after the clear_bit(). Introduce sbitmap_clear_bit_unlock() and use it for sbitmap_queue_clear() (this only adds a compiler barrier on x86_64). The other existing user of sbitmap_clear_bit() (the blk-mq software queue pending map) is serialized through a spinlock and does not need this. Reported-by: Tejun Heo <tj@kernel.org> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--include/linux/sbitmap.h8
-rw-r--r--lib/sbitmap.c10
2 files changed, 13 insertions, 5 deletions
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 0dcc60e820de..841585f6e5f2 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -171,6 +171,8 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
171 * starting from the last allocated bit. This is less efficient 171 * starting from the last allocated bit. This is less efficient
172 * than the default behavior (false). 172 * than the default behavior (false).
173 * 173 *
174 * This operation provides acquire barrier semantics if it succeeds.
175 *
174 * Return: Non-negative allocated bit number if successful, -1 otherwise. 176 * Return: Non-negative allocated bit number if successful, -1 otherwise.
175 */ 177 */
176int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); 178int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
@@ -300,6 +302,12 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
300 clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); 302 clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
301} 303}
302 304
305static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
306 unsigned int bitnr)
307{
308 clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
309}
310
303static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) 311static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
304{ 312{
305 return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); 313 return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 42b5ca0acf93..e6a9c06ec70c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -100,7 +100,7 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
100 return -1; 100 return -1;
101 } 101 }
102 102
103 if (!test_and_set_bit(nr, word)) 103 if (!test_and_set_bit_lock(nr, word))
104 break; 104 break;
105 105
106 hint = nr + 1; 106 hint = nr + 1;
@@ -434,9 +434,9 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
434 /* 434 /*
435 * Pairs with the memory barrier in set_current_state() to ensure the 435 * Pairs with the memory barrier in set_current_state() to ensure the
436 * proper ordering of clear_bit()/waitqueue_active() in the waker and 436 * proper ordering of clear_bit()/waitqueue_active() in the waker and
437 * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See 437 * test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
438 * the comment on waitqueue_active(). This is __after_atomic because we 438 * waiter. See the comment on waitqueue_active(). This is __after_atomic
439 * just did clear_bit() in the caller. 439 * because we just did clear_bit_unlock() in the caller.
440 */ 440 */
441 smp_mb__after_atomic(); 441 smp_mb__after_atomic();
442 442
@@ -469,7 +469,7 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
469void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 469void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
470 unsigned int cpu) 470 unsigned int cpu)
471{ 471{
472 sbitmap_clear_bit(&sbq->sb, nr); 472 sbitmap_clear_bit_unlock(&sbq->sb, nr);
473 sbq_wake_up(sbq); 473 sbq_wake_up(sbq);
474 if (likely(!sbq->round_robin && nr < sbq->sb.depth)) 474 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
475 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; 475 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;