aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c8
-rw-r--r--lib/sbitmap.c11
2 files changed, 17 insertions, 2 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 0a105d4af166..97f59abc3e92 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
416 else if (tbl->nest) 416 else if (tbl->nest)
417 err = rhashtable_rehash_alloc(ht, tbl, tbl->size); 417 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
418 418
419 if (!err) 419 if (!err || err == -EEXIST) {
420 err = rhashtable_rehash_table(ht); 420 int nerr;
421
422 nerr = rhashtable_rehash_table(ht);
423 err = err ?: nerr;
424 }
421 425
422 mutex_unlock(&ht->mutex); 426 mutex_unlock(&ht->mutex);
423 427
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5b382c1244ed..155fe38756ec 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
591void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 591void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
592 unsigned int cpu) 592 unsigned int cpu)
593{ 593{
594 /*
595 * Once the clear bit is set, the bit may be allocated out.
596 *
597 * Orders READ/WRITE on the asssociated instance(such as request
598 * of blk_mq) by this bit for avoiding race with re-allocation,
599 * and its pair is the memory barrier implied in __sbitmap_get_word.
600 *
601 * One invariant is that the clear bit has to be zero when the bit
602 * is in use.
603 */
604 smp_mb__before_atomic();
594 sbitmap_deferred_clear_bit(&sbq->sb, nr); 605 sbitmap_deferred_clear_bit(&sbq->sb, nr);
595 606
596 /* 607 /*