aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-28 12:18:51 -0400
committerJens Axboe <axboe@fb.com>2014-05-28 12:18:51 -0400
commit0fb662e2250afdf2c54253dbf2063dc9d8369b69 (patch)
tree6a1014ab27531dae2702894e9e3cb6cdad9f0d60 /block
parent6178976500ae61fa7b12ebb2d3de816dc9396388 (diff)
parentcdef54dd85ad66e77262ea57796a3e81683dd5d6 (diff)
Merge branch 'for-3.16/core' into for-3.16/drivers
Pull in core changes (again), since we got rid of the alloc/free hctx mq_ops hooks and mtip32xx then needed updating again. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-cpu.c5
-rw-r--r--block/blk-mq-cpumap.c5
-rw-r--r--block/blk-mq-tag.c12
-rw-r--r--block/blk-mq.c32
4 files changed, 33 insertions, 21 deletions
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index d2c253f71b86..bb3ed488f7b5 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -1,3 +1,8 @@
1/*
2 * CPU notifier helper code for blk-mq
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
1#include <linux/kernel.h> 6#include <linux/kernel.h>
2#include <linux/module.h> 7#include <linux/module.h>
3#include <linux/init.h> 8#include <linux/init.h>
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 0daacb927be1..1065d7c65fa1 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -1,3 +1,8 @@
1/*
2 * CPU <-> hardware queue mapping helpers
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
1#include <linux/kernel.h> 6#include <linux/kernel.h>
2#include <linux/threads.h> 7#include <linux/threads.h>
3#include <linux/module.h> 8#include <linux/module.h>
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 0d0640d38a06..d90c4aeb7dd3 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,3 +1,15 @@
1/*
2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
6 * submitters to sleep.
7 *
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
10 *
11 * Copyright (C) 2013-2014 Jens Axboe
12 */
1#include <linux/kernel.h> 13#include <linux/kernel.h>
2#include <linux/module.h> 14#include <linux/module.h>
3#include <linux/random.h> 15#include <linux/random.h>
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ae14749b530c..f27fe44230c2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1,3 +1,9 @@
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
1#include <linux/kernel.h> 7#include <linux/kernel.h>
2#include <linux/module.h> 8#include <linux/module.h>
3#include <linux/backing-dev.h> 9#include <linux/backing-dev.h>
@@ -1329,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1329} 1335}
1330EXPORT_SYMBOL(blk_mq_map_queue); 1336EXPORT_SYMBOL(blk_mq_map_queue);
1331 1337
1332struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
1333 unsigned int hctx_index,
1334 int node)
1335{
1336 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
1337}
1338EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1339
1340void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1341 unsigned int hctx_index)
1342{
1343 kfree(hctx);
1344}
1345EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1346
1347static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, 1338static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1348 struct blk_mq_tags *tags, unsigned int hctx_idx) 1339 struct blk_mq_tags *tags, unsigned int hctx_idx)
1349{ 1340{
@@ -1584,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
1584 1575
1585 queue_for_each_hw_ctx(q, hctx, i) { 1576 queue_for_each_hw_ctx(q, hctx, i) {
1586 free_cpumask_var(hctx->cpumask); 1577 free_cpumask_var(hctx->cpumask);
1587 set->ops->free_hctx(hctx, i); 1578 kfree(hctx);
1588 } 1579 }
1589} 1580}
1590 1581
@@ -1805,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1805 for (i = 0; i < set->nr_hw_queues; i++) { 1796 for (i = 0; i < set->nr_hw_queues; i++) {
1806 int node = blk_mq_hw_queue_to_node(map, i); 1797 int node = blk_mq_hw_queue_to_node(map, i);
1807 1798
1808 hctxs[i] = set->ops->alloc_hctx(set, i, node); 1799 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1800 GFP_KERNEL, node);
1809 if (!hctxs[i]) 1801 if (!hctxs[i])
1810 goto err_hctxs; 1802 goto err_hctxs;
1811 1803
@@ -1892,7 +1884,7 @@ err_hctxs:
1892 if (!hctxs[i]) 1884 if (!hctxs[i])
1893 break; 1885 break;
1894 free_cpumask_var(hctxs[i]->cpumask); 1886 free_cpumask_var(hctxs[i]->cpumask);
1895 set->ops->free_hctx(hctxs[i], i); 1887 kfree(hctxs[i]);
1896 } 1888 }
1897err_map: 1889err_map:
1898 kfree(hctxs); 1890 kfree(hctxs);
@@ -1977,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1977 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 1969 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1978 return -EINVAL; 1970 return -EINVAL;
1979 1971
1980 if (!set->nr_hw_queues || 1972 if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1981 !set->ops->queue_rq || !set->ops->map_queue ||
1982 !set->ops->alloc_hctx || !set->ops->free_hctx)
1983 return -EINVAL; 1973 return -EINVAL;
1984 1974
1985 1975