diff options
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r-- | lib/sbitmap.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c index e6d7d610778d..6fdc6267f4a8 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c | |||
@@ -352,8 +352,9 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, | |||
352 | if (sbq->wake_batch != wake_batch) { | 352 | if (sbq->wake_batch != wake_batch) { |
353 | WRITE_ONCE(sbq->wake_batch, wake_batch); | 353 | WRITE_ONCE(sbq->wake_batch, wake_batch); |
354 | /* | 354 | /* |
355 | * Pairs with the memory barrier in sbq_wake_up() to ensure that | 355 | * Pairs with the memory barrier in sbitmap_queue_wake_up() |
356 | * the batch size is updated before the wait counts. | 356 | * to ensure that the batch size is updated before the wait |
357 | * counts. | ||
357 | */ | 358 | */ |
358 | smp_mb__before_atomic(); | 359 | smp_mb__before_atomic(); |
359 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) | 360 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) |
@@ -463,15 +464,6 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq) | |||
463 | unsigned int wake_batch; | 464 | unsigned int wake_batch; |
464 | int wait_cnt; | 465 | int wait_cnt; |
465 | 466 | ||
466 | /* | ||
467 | * Pairs with the memory barrier in set_current_state() to ensure the | ||
468 | * proper ordering of clear_bit()/waitqueue_active() in the waker and | ||
469 | * test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the | ||
470 | * waiter. See the comment on waitqueue_active(). This is __after_atomic | ||
471 | * because we just did clear_bit_unlock() in the caller. | ||
472 | */ | ||
473 | smp_mb__after_atomic(); | ||
474 | |||
475 | ws = sbq_wake_ptr(sbq); | 467 | ws = sbq_wake_ptr(sbq); |
476 | if (!ws) | 468 | if (!ws) |
477 | return false; | 469 | return false; |
@@ -507,17 +499,26 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq) | |||
507 | return false; | 499 | return false; |
508 | } | 500 | } |
509 | 501 | ||
510 | static void sbq_wake_up(struct sbitmap_queue *sbq) | 502 | void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) |
511 | { | 503 | { |
512 | while (__sbq_wake_up(sbq)) | 504 | while (__sbq_wake_up(sbq)) |
513 | ; | 505 | ; |
514 | } | 506 | } |
507 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); | ||
515 | 508 | ||
516 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, | 509 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
517 | unsigned int cpu) | 510 | unsigned int cpu) |
518 | { | 511 | { |
519 | sbitmap_clear_bit_unlock(&sbq->sb, nr); | 512 | sbitmap_clear_bit_unlock(&sbq->sb, nr); |
520 | sbq_wake_up(sbq); | 513 | /* |
514 | * Pairs with the memory barrier in set_current_state() to ensure the | ||
515 | * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker | ||
516 | * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the | ||
517 | * waiter. See the comment on waitqueue_active(). | ||
518 | */ | ||
519 | smp_mb__after_atomic(); | ||
520 | sbitmap_queue_wake_up(sbq); | ||
521 | |||
521 | if (likely(!sbq->round_robin && nr < sbq->sb.depth)) | 522 | if (likely(!sbq->round_robin && nr < sbq->sb.depth)) |
522 | *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; | 523 | *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; |
523 | } | 524 | } |
@@ -529,7 +530,7 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) | |||
529 | 530 | ||
530 | /* | 531 | /* |
531 | * Pairs with the memory barrier in set_current_state() like in | 532 | * Pairs with the memory barrier in set_current_state() like in |
532 | * sbq_wake_up(). | 533 | * sbitmap_queue_wake_up(). |
533 | */ | 534 | */ |
534 | smp_mb(); | 535 | smp_mb(); |
535 | wake_index = atomic_read(&sbq->wake_index); | 536 | wake_index = atomic_read(&sbq->wake_index); |