summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-mq-tag.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c12
-rw-r--r--include/linux/sbitmap.h34
-rw-r--r--lib/sbitmap.c28
4 files changed, 74 insertions, 11 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 87bc5df72d48..2089c6c62f44 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -110,7 +110,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
110 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 110 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
111 struct sbitmap_queue *bt; 111 struct sbitmap_queue *bt;
112 struct sbq_wait_state *ws; 112 struct sbq_wait_state *ws;
113 DEFINE_WAIT(wait); 113 DEFINE_SBQ_WAIT(wait);
114 unsigned int tag_offset; 114 unsigned int tag_offset;
115 bool drop_ctx; 115 bool drop_ctx;
116 int tag; 116 int tag;
@@ -154,8 +154,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
154 if (tag != -1) 154 if (tag != -1)
155 break; 155 break;
156 156
157 prepare_to_wait_exclusive(&ws->wait, &wait, 157 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
158 TASK_UNINTERRUPTIBLE);
159 158
160 tag = __blk_mq_get_tag(data, bt); 159 tag = __blk_mq_get_tag(data, bt);
161 if (tag != -1) 160 if (tag != -1)
@@ -167,6 +166,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
167 bt_prev = bt; 166 bt_prev = bt;
168 io_schedule(); 167 io_schedule();
169 168
169 sbitmap_finish_wait(bt, ws, &wait);
170
170 data->ctx = blk_mq_get_ctx(data->q); 171 data->ctx = blk_mq_get_ctx(data->q);
171 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, 172 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
172 data->ctx->cpu); 173 data->ctx->cpu);
@@ -176,8 +177,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
176 else 177 else
177 bt = &tags->bitmap_tags; 178 bt = &tags->bitmap_tags;
178 179
179 finish_wait(&ws->wait, &wait);
180
181 /* 180 /*
182 * If destination hw queue is changed, fake wake up on 181 * If destination hw queue is changed, fake wake up on
183 * previous queue for compensating the wake up miss, so 182 * previous queue for compensating the wake up miss, so
@@ -192,7 +191,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
192 if (drop_ctx && data->ctx) 191 if (drop_ctx && data->ctx)
193 blk_mq_put_ctx(data->ctx); 192 blk_mq_put_ctx(data->ctx);
194 193
195 finish_wait(&ws->wait, &wait); 194 sbitmap_finish_wait(bt, ws, &wait);
196 195
197found_tag: 196found_tag:
198 return tag + tag_offset; 197 return tag + tag_offset;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 36b742932c72..86987da86dd6 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -150,24 +150,26 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
150static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup) 150static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
151{ 151{
152 int tag = -1; 152 int tag = -1;
153 DEFINE_WAIT(wait); 153 DEFINE_SBQ_WAIT(wait);
154 struct sbq_wait_state *ws; 154 struct sbq_wait_state *ws;
155 struct sbitmap_queue *sbq;
155 156
156 if (state == TASK_RUNNING) 157 if (state == TASK_RUNNING)
157 return tag; 158 return tag;
158 159
159 ws = &se_sess->sess_tag_pool.ws[0]; 160 sbq = &se_sess->sess_tag_pool;
161 ws = &sbq->ws[0];
160 for (;;) { 162 for (;;) {
161 prepare_to_wait_exclusive(&ws->wait, &wait, state); 163 sbitmap_prepare_to_wait(sbq, ws, &wait, state);
162 if (signal_pending_state(state, current)) 164 if (signal_pending_state(state, current))
163 break; 165 break;
164 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup); 166 tag = sbitmap_queue_get(sbq, cpup);
165 if (tag >= 0) 167 if (tag >= 0)
166 break; 168 break;
167 schedule(); 169 schedule();
168 } 170 }
169 171
170 finish_wait(&ws->wait, &wait); 172 sbitmap_finish_wait(sbq, ws, &wait);
171 return tag; 173 return tag;
172} 174}
173 175
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 81359d45751e..92806a2dbab7 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -135,6 +135,11 @@ struct sbitmap_queue {
135 */ 135 */
136 struct sbq_wait_state *ws; 136 struct sbq_wait_state *ws;
137 137
138 /*
139 * @ws_active: count of currently active ws waitqueues
140 */
141 atomic_t ws_active;
142
138 /** 143 /**
139 * @round_robin: Allocate bits in strict round-robin order. 144 * @round_robin: Allocate bits in strict round-robin order.
140 */ 145 */
@@ -552,4 +557,33 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
552 */ 557 */
553void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); 558void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
554 559
560struct sbq_wait {
561 int accounted;
562 struct wait_queue_entry wait;
563};
564
565#define DEFINE_SBQ_WAIT(name) \
566 struct sbq_wait name = { \
567 .accounted = 0, \
568 .wait = { \
569 .private = current, \
570 .func = autoremove_wake_function, \
571 .entry = LIST_HEAD_INIT((name).wait.entry), \
572 } \
573 }
574
575/*
576 * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
577 * internal state.
578 */
579void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
580 struct sbq_wait_state *ws,
581 struct sbq_wait *sbq_wait, int state);
582
583/*
584 * Must be paired with sbitmap_prepare_to_wait().
585 */
586void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
587 struct sbq_wait *sbq_wait);
588
555#endif /* __LINUX_SCALE_BITMAP_H */ 589#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index f99382e59314..a89fbe7cf6ca 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -394,6 +394,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
394 sbq->min_shallow_depth = UINT_MAX; 394 sbq->min_shallow_depth = UINT_MAX;
395 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 395 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
396 atomic_set(&sbq->wake_index, 0); 396 atomic_set(&sbq->wake_index, 0);
397 atomic_set(&sbq->ws_active, 0);
397 398
398 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 399 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
399 if (!sbq->ws) { 400 if (!sbq->ws) {
@@ -509,6 +510,9 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
509{ 510{
510 int i, wake_index; 511 int i, wake_index;
511 512
513 if (!atomic_read(&sbq->ws_active))
514 return NULL;
515
512 wake_index = atomic_read(&sbq->wake_index); 516 wake_index = atomic_read(&sbq->wake_index);
513 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 517 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
514 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 518 struct sbq_wait_state *ws = &sbq->ws[wake_index];
@@ -634,6 +638,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
634 638
635 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 639 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
636 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 640 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
641 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
637 642
638 seq_puts(m, "ws={\n"); 643 seq_puts(m, "ws={\n");
639 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 644 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
@@ -649,3 +654,26 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
649 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 654 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
650} 655}
651EXPORT_SYMBOL_GPL(sbitmap_queue_show); 656EXPORT_SYMBOL_GPL(sbitmap_queue_show);
657
658void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
659 struct sbq_wait_state *ws,
660 struct sbq_wait *sbq_wait, int state)
661{
662 if (!sbq_wait->accounted) {
663 atomic_inc(&sbq->ws_active);
664 sbq_wait->accounted = 1;
665 }
666 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
667}
668EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
669
670void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
671 struct sbq_wait *sbq_wait)
672{
673 finish_wait(&ws->wait, &sbq_wait->wait);
674 if (sbq_wait->accounted) {
675 atomic_dec(&sbq->ws_active);
676 sbq_wait->accounted = 0;
677 }
678}
679EXPORT_SYMBOL_GPL(sbitmap_finish_wait);