aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-19 13:52:35 -0400
committerJens Axboe <axboe@fb.com>2014-05-19 13:52:35 -0400
commit39a9f97e5ea99e048c4980c23cf197f6e77995cb (patch)
treeb1f72ed1e852372b6d86b79157b09f77fabc5a20 /block/blk-mq-tag.c
parent1429d7c9467e1e3de0b0ff91d7e4d67c1a92f8a3 (diff)
parent0d2602ca30e410e84e8bdf05c84ed5688e0a5a44 (diff)
Merge branch 'for-3.16/blk-mq-tagging' into for-3.16/core
Signed-off-by: Jens Axboe <axboe@fb.com> Conflicts: block/blk-mq-tag.c
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c112
1 files changed, 95 insertions, 17 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 03ce6a11ba79..e6b3fbae9862 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -7,13 +7,12 @@
7#include "blk-mq.h" 7#include "blk-mq.h"
8#include "blk-mq-tag.h" 8#include "blk-mq-tag.h"
9 9
10void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, 10void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved)
11 bool reserved)
12{ 11{
13 int tag, zero = 0; 12 int tag, zero = 0;
14 13
15 tag = blk_mq_get_tag(tags, hctx, &zero, __GFP_WAIT, reserved); 14 tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved);
16 blk_mq_put_tag(tags, tag, &zero); 15 blk_mq_put_tag(hctx, tag, &zero);
17} 16}
18 17
19static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) 18static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
@@ -40,6 +39,84 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
40 return bt_has_free_tags(&tags->bitmap_tags); 39 return bt_has_free_tags(&tags->bitmap_tags);
41} 40}
42 41
42static inline void bt_index_inc(unsigned int *index)
43{
44 *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
45}
46
47/*
48 * If a previously inactive queue goes active, bump the active user count.
49 */
50bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
51{
52 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
53 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
54 atomic_inc(&hctx->tags->active_queues);
55
56 return true;
57}
58
59/*
60 * If a previously busy queue goes inactive, potential waiters could now
61 * be allowed to queue. Wake them up and check.
62 */
63void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
64{
65 struct blk_mq_tags *tags = hctx->tags;
66 struct blk_mq_bitmap_tags *bt;
67 int i, wake_index;
68
69 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
70 return;
71
72 atomic_dec(&tags->active_queues);
73
74 /*
75 * Will only throttle depth on non-reserved tags
76 */
77 bt = &tags->bitmap_tags;
78 wake_index = bt->wake_index;
79 for (i = 0; i < BT_WAIT_QUEUES; i++) {
80 struct bt_wait_state *bs = &bt->bs[wake_index];
81
82 if (waitqueue_active(&bs->wait))
83 wake_up(&bs->wait);
84
85 bt_index_inc(&wake_index);
86 }
87}
88
89/*
90 * For shared tag users, we track the number of currently active users
91 * and attempt to provide a fair share of the tag depth for each of them.
92 */
93static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
94 struct blk_mq_bitmap_tags *bt)
95{
96 unsigned int depth, users;
97
98 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
99 return true;
100 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
101 return true;
102
103 /*
104 * Don't try dividing an ant
105 */
106 if (bt->depth == 1)
107 return true;
108
109 users = atomic_read(&hctx->tags->active_queues);
110 if (!users)
111 return true;
112
113 /*
114 * Allow at least some tags
115 */
116 depth = max((bt->depth + users - 1) / users, 4U);
117 return atomic_read(&hctx->nr_active) < depth;
118}
119
43static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) 120static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
44{ 121{
45 int tag, org_last_tag, end; 122 int tag, org_last_tag, end;
@@ -78,11 +155,15 @@ restart:
78 * multiple users will tend to stick to different cachelines, at least 155 * multiple users will tend to stick to different cachelines, at least
79 * until the map is exhausted. 156 * until the map is exhausted.
80 */ 157 */
81static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache) 158static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
159 unsigned int *tag_cache)
82{ 160{
83 unsigned int last_tag, org_last_tag; 161 unsigned int last_tag, org_last_tag;
84 int index, i, tag; 162 int index, i, tag;
85 163
164 if (!hctx_may_queue(hctx, bt))
165 return -1;
166
86 last_tag = org_last_tag = *tag_cache; 167 last_tag = org_last_tag = *tag_cache;
87 index = TAG_TO_INDEX(bt, last_tag); 168 index = TAG_TO_INDEX(bt, last_tag);
88 169
@@ -117,11 +198,6 @@ done:
117 return tag; 198 return tag;
118} 199}
119 200
120static inline void bt_index_inc(unsigned int *index)
121{
122 *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
123}
124
125static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, 201static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
126 struct blk_mq_hw_ctx *hctx) 202 struct blk_mq_hw_ctx *hctx)
127{ 203{
@@ -142,7 +218,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
142 DEFINE_WAIT(wait); 218 DEFINE_WAIT(wait);
143 int tag; 219 int tag;
144 220
145 tag = __bt_get(bt, last_tag); 221 tag = __bt_get(hctx, bt, last_tag);
146 if (tag != -1) 222 if (tag != -1)
147 return tag; 223 return tag;
148 224
@@ -156,7 +232,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
156 was_empty = list_empty(&wait.task_list); 232 was_empty = list_empty(&wait.task_list);
157 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); 233 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
158 234
159 tag = __bt_get(bt, last_tag); 235 tag = __bt_get(hctx, bt, last_tag);
160 if (tag != -1) 236 if (tag != -1)
161 break; 237 break;
162 238
@@ -200,14 +276,13 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
200 return tag; 276 return tag;
201} 277}
202 278
203unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, 279unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag,
204 struct blk_mq_hw_ctx *hctx, unsigned int *last_tag,
205 gfp_t gfp, bool reserved) 280 gfp_t gfp, bool reserved)
206{ 281{
207 if (!reserved) 282 if (!reserved)
208 return __blk_mq_get_tag(tags, hctx, last_tag, gfp); 283 return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp);
209 284
210 return __blk_mq_get_reserved_tag(tags, gfp); 285 return __blk_mq_get_reserved_tag(hctx->tags, gfp);
211} 286}
212 287
213static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) 288static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
@@ -265,9 +340,11 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
265 bt_clear_tag(&tags->breserved_tags, tag); 340 bt_clear_tag(&tags->breserved_tags, tag);
266} 341}
267 342
268void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, 343void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
269 unsigned int *last_tag) 344 unsigned int *last_tag)
270{ 345{
346 struct blk_mq_tags *tags = hctx->tags;
347
271 if (tag >= tags->nr_reserved_tags) { 348 if (tag >= tags->nr_reserved_tags) {
272 const int real_tag = tag - tags->nr_reserved_tags; 349 const int real_tag = tag - tags->nr_reserved_tags;
273 350
@@ -465,6 +542,7 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
465 res = bt_unused_tags(&tags->breserved_tags); 542 res = bt_unused_tags(&tags->breserved_tags);
466 543
467 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); 544 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
545 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
468 546
469 return page - orig_page; 547 return page - orig_page;
470} 548}