aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sysfs.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2013-10-24 04:20:05 -0400
committerJens Axboe <axboe@kernel.dk>2013-10-25 06:56:00 -0400
commit320ae51feed5c2f13664aa05a76bec198967e04d (patch)
treead37ccbcc5ddb1c9c19e48965bf8fec1b05217dc /block/blk-mq-sysfs.c
parent1dddc01af0d42b21058e0cb9c1ca9e8d5204d9b0 (diff)
blk-mq: new multi-queue block IO queueing mechanism
Linux currently has two models for block devices: - The classic request_fn based approach, where drivers use struct request units for IO. The block layer provides various helper functionalities to let drivers share code, things like tag management, timeout handling, queueing, etc. - The "stacked" approach, where a driver squeezes in between the block layer and IO submitter. Since this bypasses the IO stack, driver generally have to manage everything themselves. With drivers being written for new high IOPS devices, the classic request_fn based driver doesn't work well enough. The design dates back to when both SMP and high IOPS was rare. It has problems with scaling to bigger machines, and runs into scaling issues even on smaller machines when you have IOPS in the hundreds of thousands per device. The stacked approach is then most often selected as the model for the driver. But this means that everybody has to re-invent everything, and along with that we get all the problems again that the shared approach solved. This commit introduces blk-mq, block multi queue support. The design is centered around per-cpu queues for queueing IO, which then funnel down into x number of hardware submission queues. We might have a 1:1 mapping between the two, or it might be an N:M mapping. That all depends on what the hardware supports. blk-mq provides various helper functions, which include: - Scalable support for request tagging. Most devices need to be able to uniquely identify a request both in the driver and to the hardware. The tagging uses per-cpu caches for freed tags, to enable cache hot reuse. - Timeout handling without tracking request on a per-device basis. Basically the driver should be able to get a notification, if a request happens to fail. - Optional support for non 1:1 mappings between issue and submission queues. blk-mq can redirect IO completions to the desired location. - Support for per-request payloads. Drivers almost always need to associate a request structure with some driver private command structure. Drivers can tell blk-mq this at init time, and then any request handed to the driver will have the required size of memory associated with it. - Support for merging of IO, and plugging. The stacked model gets neither of these. Even for high IOPS devices, merging sequential IO reduces per-command overhead and thus increases bandwidth. For now, this is provided as a potential 3rd queueing model, with the hope being that, as it matures, it can replace both the classic and stacked model. That would get us back to having just 1 real model for block devices, leaving the stacked approach to dm/md devices (as it was originally intended). Contributions in this patch from the following people: Shaohua Li <shli@fusionio.com> Alexander Gordeev <agordeev@redhat.com> Christoph Hellwig <hch@infradead.org> Mike Christie <michaelc@cs.wisc.edu> Matias Bjorling <m@bjorling.me> Jeff Moyer <jmoyer@redhat.com> Acked-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sysfs.c')
-rw-r--r--block/blk-mq-sysfs.c384
1 files changed, 384 insertions, 0 deletions
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
new file mode 100644
index 000000000000..ba6cf8e9aa0a
--- /dev/null
+++ b/block/blk-mq-sysfs.c
@@ -0,0 +1,384 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/backing-dev.h>
4#include <linux/bio.h>
5#include <linux/blkdev.h>
6#include <linux/mm.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/workqueue.h>
10#include <linux/smp.h>
11
12#include <linux/blk-mq.h>
13#include "blk-mq.h"
14#include "blk-mq-tag.h"
15
16static void blk_mq_sysfs_release(struct kobject *kobj)
17{
18}
19
20struct blk_mq_ctx_sysfs_entry {
21 struct attribute attr;
22 ssize_t (*show)(struct blk_mq_ctx *, char *);
23 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24};
25
26struct blk_mq_hw_ctx_sysfs_entry {
27 struct attribute attr;
28 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30};
31
32static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33 char *page)
34{
35 struct blk_mq_ctx_sysfs_entry *entry;
36 struct blk_mq_ctx *ctx;
37 struct request_queue *q;
38 ssize_t res;
39
40 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42 q = ctx->queue;
43
44 if (!entry->show)
45 return -EIO;
46
47 res = -ENOENT;
48 mutex_lock(&q->sysfs_lock);
49 if (!blk_queue_dying(q))
50 res = entry->show(ctx, page);
51 mutex_unlock(&q->sysfs_lock);
52 return res;
53}
54
55static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56 const char *page, size_t length)
57{
58 struct blk_mq_ctx_sysfs_entry *entry;
59 struct blk_mq_ctx *ctx;
60 struct request_queue *q;
61 ssize_t res;
62
63 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65 q = ctx->queue;
66
67 if (!entry->store)
68 return -EIO;
69
70 res = -ENOENT;
71 mutex_lock(&q->sysfs_lock);
72 if (!blk_queue_dying(q))
73 res = entry->store(ctx, page, length);
74 mutex_unlock(&q->sysfs_lock);
75 return res;
76}
77
78static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79 struct attribute *attr, char *page)
80{
81 struct blk_mq_hw_ctx_sysfs_entry *entry;
82 struct blk_mq_hw_ctx *hctx;
83 struct request_queue *q;
84 ssize_t res;
85
86 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88 q = hctx->queue;
89
90 if (!entry->show)
91 return -EIO;
92
93 res = -ENOENT;
94 mutex_lock(&q->sysfs_lock);
95 if (!blk_queue_dying(q))
96 res = entry->show(hctx, page);
97 mutex_unlock(&q->sysfs_lock);
98 return res;
99}
100
101static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102 struct attribute *attr, const char *page,
103 size_t length)
104{
105 struct blk_mq_hw_ctx_sysfs_entry *entry;
106 struct blk_mq_hw_ctx *hctx;
107 struct request_queue *q;
108 ssize_t res;
109
110 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 q = hctx->queue;
113
114 if (!entry->store)
115 return -EIO;
116
117 res = -ENOENT;
118 mutex_lock(&q->sysfs_lock);
119 if (!blk_queue_dying(q))
120 res = entry->store(hctx, page, length);
121 mutex_unlock(&q->sysfs_lock);
122 return res;
123}
124
125static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126{
127 return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128 ctx->rq_dispatched[0]);
129}
130
131static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132{
133 return sprintf(page, "%lu\n", ctx->rq_merged);
134}
135
136static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137{
138 return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139 ctx->rq_completed[0]);
140}
141
142static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143{
144 char *start_page = page;
145 struct request *rq;
146
147 page += sprintf(page, "%s:\n", msg);
148
149 list_for_each_entry(rq, list, queuelist)
150 page += sprintf(page, "\t%p\n", rq);
151
152 return page - start_page;
153}
154
155static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
156{
157 ssize_t ret;
158
159 spin_lock(&ctx->lock);
160 ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
161 spin_unlock(&ctx->lock);
162
163 return ret;
164}
165
166static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
167 char *page)
168{
169 return sprintf(page, "%lu\n", hctx->queued);
170}
171
172static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
173{
174 return sprintf(page, "%lu\n", hctx->run);
175}
176
177static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
178 char *page)
179{
180 char *start_page = page;
181 int i;
182
183 page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
184
185 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
186 unsigned long d = 1U << (i - 1);
187
188 page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
189 }
190
191 return page - start_page;
192}
193
194static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
195 char *page)
196{
197 ssize_t ret;
198
199 spin_lock(&hctx->lock);
200 ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
201 spin_unlock(&hctx->lock);
202
203 return ret;
204}
205
206static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
207{
208 ssize_t ret;
209
210 spin_lock(&hctx->lock);
211 ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
212 spin_unlock(&hctx->lock);
213
214 return ret;
215}
216
217static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
218 const char *page, size_t len)
219{
220 struct blk_mq_ctx *ctx;
221 unsigned long ret;
222 unsigned int i;
223
224 if (kstrtoul(page, 10, &ret)) {
225 pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
226 return -EINVAL;
227 }
228
229 spin_lock(&hctx->lock);
230 if (ret)
231 hctx->flags |= BLK_MQ_F_SHOULD_IPI;
232 else
233 hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
234 spin_unlock(&hctx->lock);
235
236 hctx_for_each_ctx(hctx, ctx, i)
237 ctx->ipi_redirect = !!ret;
238
239 return len;
240}
241
242static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
243{
244 return blk_mq_tag_sysfs_show(hctx->tags, page);
245}
246
247static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
248 .attr = {.name = "dispatched", .mode = S_IRUGO },
249 .show = blk_mq_sysfs_dispatched_show,
250};
251static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
252 .attr = {.name = "merged", .mode = S_IRUGO },
253 .show = blk_mq_sysfs_merged_show,
254};
255static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
256 .attr = {.name = "completed", .mode = S_IRUGO },
257 .show = blk_mq_sysfs_completed_show,
258};
259static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
260 .attr = {.name = "rq_list", .mode = S_IRUGO },
261 .show = blk_mq_sysfs_rq_list_show,
262};
263
264static struct attribute *default_ctx_attrs[] = {
265 &blk_mq_sysfs_dispatched.attr,
266 &blk_mq_sysfs_merged.attr,
267 &blk_mq_sysfs_completed.attr,
268 &blk_mq_sysfs_rq_list.attr,
269 NULL,
270};
271
272static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
273 .attr = {.name = "queued", .mode = S_IRUGO },
274 .show = blk_mq_hw_sysfs_queued_show,
275};
276static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
277 .attr = {.name = "run", .mode = S_IRUGO },
278 .show = blk_mq_hw_sysfs_run_show,
279};
280static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
281 .attr = {.name = "dispatched", .mode = S_IRUGO },
282 .show = blk_mq_hw_sysfs_dispatched_show,
283};
284static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
285 .attr = {.name = "pending", .mode = S_IRUGO },
286 .show = blk_mq_hw_sysfs_rq_list_show,
287};
288static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
289 .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
290 .show = blk_mq_hw_sysfs_ipi_show,
291 .store = blk_mq_hw_sysfs_ipi_store,
292};
293static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
294 .attr = {.name = "tags", .mode = S_IRUGO },
295 .show = blk_mq_hw_sysfs_tags_show,
296};
297
298static struct attribute *default_hw_ctx_attrs[] = {
299 &blk_mq_hw_sysfs_queued.attr,
300 &blk_mq_hw_sysfs_run.attr,
301 &blk_mq_hw_sysfs_dispatched.attr,
302 &blk_mq_hw_sysfs_pending.attr,
303 &blk_mq_hw_sysfs_ipi.attr,
304 &blk_mq_hw_sysfs_tags.attr,
305 NULL,
306};
307
308static const struct sysfs_ops blk_mq_sysfs_ops = {
309 .show = blk_mq_sysfs_show,
310 .store = blk_mq_sysfs_store,
311};
312
313static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
314 .show = blk_mq_hw_sysfs_show,
315 .store = blk_mq_hw_sysfs_store,
316};
317
318static struct kobj_type blk_mq_ktype = {
319 .sysfs_ops = &blk_mq_sysfs_ops,
320 .release = blk_mq_sysfs_release,
321};
322
323static struct kobj_type blk_mq_ctx_ktype = {
324 .sysfs_ops = &blk_mq_sysfs_ops,
325 .default_attrs = default_ctx_attrs,
326 .release = blk_mq_sysfs_release,
327};
328
329static struct kobj_type blk_mq_hw_ktype = {
330 .sysfs_ops = &blk_mq_hw_sysfs_ops,
331 .default_attrs = default_hw_ctx_attrs,
332 .release = blk_mq_sysfs_release,
333};
334
335void blk_mq_unregister_disk(struct gendisk *disk)
336{
337 struct request_queue *q = disk->queue;
338
339 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
340 kobject_del(&q->mq_kobj);
341
342 kobject_put(&disk_to_dev(disk)->kobj);
343}
344
345int blk_mq_register_disk(struct gendisk *disk)
346{
347 struct device *dev = disk_to_dev(disk);
348 struct request_queue *q = disk->queue;
349 struct blk_mq_hw_ctx *hctx;
350 struct blk_mq_ctx *ctx;
351 int ret, i, j;
352
353 kobject_init(&q->mq_kobj, &blk_mq_ktype);
354
355 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
356 if (ret < 0)
357 return ret;
358
359 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
360
361 queue_for_each_hw_ctx(q, hctx, i) {
362 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
363 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
364 if (ret)
365 break;
366
367 if (!hctx->nr_ctx)
368 continue;
369
370 hctx_for_each_ctx(hctx, ctx, j) {
371 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
372 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
373 if (ret)
374 break;
375 }
376 }
377
378 if (ret) {
379 blk_mq_unregister_disk(disk);
380 return ret;
381 }
382
383 return 0;
384}