aboutsummaryrefslogtreecommitdiffstats
path: root/lib/sbitmap.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-29 19:36:41 -0500
committerJens Axboe <axboe@kernel.dk>2018-11-30 16:48:04 -0500
commit5d2ee7122c73be6a3b6bfe90d237e8aed737cfaa (patch)
treeada2bd06a0523a6ab8a9eae652034e2b0cb7b996 /lib/sbitmap.c
parentea86ea2cdced20057da4d2c32965c1219c238197 (diff)
sbitmap: optimize wakeup check
Even if we have no waiters on any of the sbitmap_queue wait states, we still have to loop every entry to check. We do this for every IO, so the cost adds up. Shift a bit of the cost to the slow path, when we actually have waiters. Wrap prepare_to_wait_exclusive() and finish_wait(), so we can maintain an internal count of how many are currently active. Then we can simply check this count in sbq_wake_ptr() and not have to loop if we don't have any sleepers. Convert the two users of sbitmap with waiting, blk-mq-tag and iSCSI. Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r--lib/sbitmap.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index f99382e59314..a89fbe7cf6ca 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -394,6 +394,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
394 sbq->min_shallow_depth = UINT_MAX; 394 sbq->min_shallow_depth = UINT_MAX;
395 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 395 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
396 atomic_set(&sbq->wake_index, 0); 396 atomic_set(&sbq->wake_index, 0);
397 atomic_set(&sbq->ws_active, 0);
397 398
398 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 399 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
399 if (!sbq->ws) { 400 if (!sbq->ws) {
@@ -509,6 +510,9 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
509{ 510{
510 int i, wake_index; 511 int i, wake_index;
511 512
513 if (!atomic_read(&sbq->ws_active))
514 return NULL;
515
512 wake_index = atomic_read(&sbq->wake_index); 516 wake_index = atomic_read(&sbq->wake_index);
513 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 517 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
514 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 518 struct sbq_wait_state *ws = &sbq->ws[wake_index];
@@ -634,6 +638,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
634 638
635 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 639 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
636 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 640 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
641 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
637 642
638 seq_puts(m, "ws={\n"); 643 seq_puts(m, "ws={\n");
639 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 644 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
@@ -649,3 +654,26 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
649 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 654 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
650} 655}
651EXPORT_SYMBOL_GPL(sbitmap_queue_show); 656EXPORT_SYMBOL_GPL(sbitmap_queue_show);
657
658void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
659 struct sbq_wait_state *ws,
660 struct sbq_wait *sbq_wait, int state)
661{
662 if (!sbq_wait->accounted) {
663 atomic_inc(&sbq->ws_active);
664 sbq_wait->accounted = 1;
665 }
666 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
667}
668EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
669
670void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
671 struct sbq_wait *sbq_wait)
672{
673 finish_wait(&ws->wait, &sbq_wait->wait);
674 if (sbq_wait->accounted) {
675 atomic_dec(&sbq->ws_active);
676 sbq_wait->accounted = 0;
677 }
678}
679EXPORT_SYMBOL_GPL(sbitmap_finish_wait);