aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-28 12:18:51 -0400
committerJens Axboe <axboe@fb.com>2014-05-28 12:18:51 -0400
commit0fb662e2250afdf2c54253dbf2063dc9d8369b69 (patch)
tree6a1014ab27531dae2702894e9e3cb6cdad9f0d60
parent6178976500ae61fa7b12ebb2d3de816dc9396388 (diff)
parentcdef54dd85ad66e77262ea57796a3e81683dd5d6 (diff)
Merge branch 'for-3.16/core' into for-3.16/drivers
Pull in core changes (again), since we got rid of the alloc/free hctx mq_ops hooks and mtip32xx then needed updating again. Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq-cpu.c5
-rw-r--r--block/blk-mq-cpumap.c5
-rw-r--r--block/blk-mq-tag.c12
-rw-r--r--block/blk-mq.c32
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/null_blk.c28
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--include/linux/blk-mq.h10
8 files changed, 34 insertions, 62 deletions
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index d2c253f71b86..bb3ed488f7b5 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -1,3 +1,8 @@
1/*
2 * CPU notifier helper code for blk-mq
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
1#include <linux/kernel.h> 6#include <linux/kernel.h>
2#include <linux/module.h> 7#include <linux/module.h>
3#include <linux/init.h> 8#include <linux/init.h>
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 0daacb927be1..1065d7c65fa1 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -1,3 +1,8 @@
1/*
2 * CPU <-> hardware queue mapping helpers
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
1#include <linux/kernel.h> 6#include <linux/kernel.h>
2#include <linux/threads.h> 7#include <linux/threads.h>
3#include <linux/module.h> 8#include <linux/module.h>
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 0d0640d38a06..d90c4aeb7dd3 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,3 +1,15 @@
1/*
2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
6 * submitters to sleep.
7 *
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
10 *
11 * Copyright (C) 2013-2014 Jens Axboe
12 */
1#include <linux/kernel.h> 13#include <linux/kernel.h>
2#include <linux/module.h> 14#include <linux/module.h>
3#include <linux/random.h> 15#include <linux/random.h>
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ae14749b530c..f27fe44230c2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1,3 +1,9 @@
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
1#include <linux/kernel.h> 7#include <linux/kernel.h>
2#include <linux/module.h> 8#include <linux/module.h>
3#include <linux/backing-dev.h> 9#include <linux/backing-dev.h>
@@ -1329,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1329} 1335}
1330EXPORT_SYMBOL(blk_mq_map_queue); 1336EXPORT_SYMBOL(blk_mq_map_queue);
1331 1337
1332struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
1333 unsigned int hctx_index,
1334 int node)
1335{
1336 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
1337}
1338EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1339
1340void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1341 unsigned int hctx_index)
1342{
1343 kfree(hctx);
1344}
1345EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1346
1347static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, 1338static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1348 struct blk_mq_tags *tags, unsigned int hctx_idx) 1339 struct blk_mq_tags *tags, unsigned int hctx_idx)
1349{ 1340{
@@ -1584,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
1584 1575
1585 queue_for_each_hw_ctx(q, hctx, i) { 1576 queue_for_each_hw_ctx(q, hctx, i) {
1586 free_cpumask_var(hctx->cpumask); 1577 free_cpumask_var(hctx->cpumask);
1587 set->ops->free_hctx(hctx, i); 1578 kfree(hctx);
1588 } 1579 }
1589} 1580}
1590 1581
@@ -1805,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1805 for (i = 0; i < set->nr_hw_queues; i++) { 1796 for (i = 0; i < set->nr_hw_queues; i++) {
1806 int node = blk_mq_hw_queue_to_node(map, i); 1797 int node = blk_mq_hw_queue_to_node(map, i);
1807 1798
1808 hctxs[i] = set->ops->alloc_hctx(set, i, node); 1799 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1800 GFP_KERNEL, node);
1809 if (!hctxs[i]) 1801 if (!hctxs[i])
1810 goto err_hctxs; 1802 goto err_hctxs;
1811 1803
@@ -1892,7 +1884,7 @@ err_hctxs:
1892 if (!hctxs[i]) 1884 if (!hctxs[i])
1893 break; 1885 break;
1894 free_cpumask_var(hctxs[i]->cpumask); 1886 free_cpumask_var(hctxs[i]->cpumask);
1895 set->ops->free_hctx(hctxs[i], i); 1887 kfree(hctxs[i]);
1896 } 1888 }
1897err_map: 1889err_map:
1898 kfree(hctxs); 1890 kfree(hctxs);
@@ -1977,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1977 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 1969 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1978 return -EINVAL; 1970 return -EINVAL;
1979 1971
1980 if (!set->nr_hw_queues || 1972 if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1981 !set->ops->queue_rq || !set->ops->map_queue ||
1982 !set->ops->alloc_hctx || !set->ops->free_hctx)
1983 return -EINVAL; 1973 return -EINVAL;
1984 1974
1985 1975
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index ea323e91903b..74abd49fabdc 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3832,8 +3832,6 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
3832static struct blk_mq_ops mtip_mq_ops = { 3832static struct blk_mq_ops mtip_mq_ops = {
3833 .queue_rq = mtip_queue_rq, 3833 .queue_rq = mtip_queue_rq,
3834 .map_queue = blk_mq_map_queue, 3834 .map_queue = blk_mq_map_queue,
3835 .alloc_hctx = blk_mq_alloc_single_hw_queue,
3836 .free_hctx = blk_mq_free_single_hw_queue,
3837 .init_request = mtip_init_cmd, 3835 .init_request = mtip_init_cmd,
3838 .exit_request = mtip_free_cmd, 3836 .exit_request = mtip_free_cmd,
3839}; 3837};
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 5a8081114df6..77087a29b127 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -321,18 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
321 return BLK_MQ_RQ_QUEUE_OK; 321 return BLK_MQ_RQ_QUEUE_OK;
322} 322}
323 323
324static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
325 unsigned int hctx_index,
326 int node)
327{
328 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
329}
330
331static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
332{
333 kfree(hctx);
334}
335
336static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) 324static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
337{ 325{
338 BUG_ON(!nullb); 326 BUG_ON(!nullb);
@@ -360,17 +348,6 @@ static struct blk_mq_ops null_mq_ops = {
360 .map_queue = blk_mq_map_queue, 348 .map_queue = blk_mq_map_queue,
361 .init_hctx = null_init_hctx, 349 .init_hctx = null_init_hctx,
362 .complete = null_softirq_done_fn, 350 .complete = null_softirq_done_fn,
363 .alloc_hctx = blk_mq_alloc_single_hw_queue,
364 .free_hctx = blk_mq_free_single_hw_queue,
365};
366
367static struct blk_mq_ops null_mq_ops_pernode = {
368 .queue_rq = null_queue_rq,
369 .map_queue = blk_mq_map_queue,
370 .init_hctx = null_init_hctx,
371 .complete = null_softirq_done_fn,
372 .alloc_hctx = null_alloc_hctx,
373 .free_hctx = null_free_hctx,
374}; 351};
375 352
376static void null_del_dev(struct nullb *nullb) 353static void null_del_dev(struct nullb *nullb)
@@ -496,10 +473,7 @@ static int null_add_dev(void)
496 goto out_free_nullb; 473 goto out_free_nullb;
497 474
498 if (queue_mode == NULL_Q_MQ) { 475 if (queue_mode == NULL_Q_MQ) {
499 if (use_per_node_hctx) 476 nullb->tag_set.ops = &null_mq_ops;
500 nullb->tag_set.ops = &null_mq_ops_pernode;
501 else
502 nullb->tag_set.ops = &null_mq_ops;
503 nullb->tag_set.nr_hw_queues = submit_queues; 477 nullb->tag_set.nr_hw_queues = submit_queues;
504 nullb->tag_set.queue_depth = hw_queue_depth; 478 nullb->tag_set.queue_depth = hw_queue_depth;
505 nullb->tag_set.numa_node = home_node; 479 nullb->tag_set.numa_node = home_node;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 9f340fafca5c..c8f286e8d80f 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -497,8 +497,6 @@ static int virtblk_init_request(void *data, struct request *rq,
497static struct blk_mq_ops virtio_mq_ops = { 497static struct blk_mq_ops virtio_mq_ops = {
498 .queue_rq = virtio_queue_rq, 498 .queue_rq = virtio_queue_rq,
499 .map_queue = blk_mq_map_queue, 499 .map_queue = blk_mq_map_queue,
500 .alloc_hctx = blk_mq_alloc_single_hw_queue,
501 .free_hctx = blk_mq_free_single_hw_queue,
502 .complete = virtblk_request_done, 500 .complete = virtblk_request_done,
503 .init_request = virtblk_init_request, 501 .init_request = virtblk_init_request,
504}; 502};
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2bd82f399128..91dfb75ce39f 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -79,9 +79,6 @@ struct blk_mq_tag_set {
79 79
80typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); 80typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
81typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 81typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
82typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
83 unsigned int, int);
84typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
85typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 82typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
86typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 83typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
87typedef int (init_request_fn)(void *, struct request *, unsigned int, 84typedef int (init_request_fn)(void *, struct request *, unsigned int,
@@ -108,12 +105,6 @@ struct blk_mq_ops {
108 softirq_done_fn *complete; 105 softirq_done_fn *complete;
109 106
110 /* 107 /*
111 * Override for hctx allocations (should probably go)
112 */
113 alloc_hctx_fn *alloc_hctx;
114 free_hctx_fn *free_hctx;
115
116 /*
117 * Called when the block layer side of a hardware queue has been 108 * Called when the block layer side of a hardware queue has been
118 * set up, allowing the driver to allocate/init matching structures. 109 * set up, allowing the driver to allocate/init matching structures.
119 * Ditto for exit/teardown. 110 * Ditto for exit/teardown.
@@ -166,7 +157,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
166 157
167struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 158struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
168struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 159struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
169void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
170 160
171void blk_mq_end_io(struct request *rq, int error); 161void blk_mq_end_io(struct request *rq, int error);
172void __blk_mq_end_io(struct request *rq, int error); 162void __blk_mq_end_io(struct request *rq, int error);