diff options
author | Christoph Hellwig <hch@lst.de> | 2014-05-28 12:11:06 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-28 12:18:31 -0400 |
commit | cdef54dd85ad66e77262ea57796a3e81683dd5d6 (patch) | |
tree | 55ae5b4f46a884b15879eaab19c7af72db23927b /block | |
parent | 75bb4625bb78d6a2d879dcb6a7d482861295765b (diff) |
blk-mq: remove alloc_hctx and free_hctx methods
There is no need for drivers to control hardware context allocation
now that we do the context to node mapping in common code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 26 |
1 files changed, 5 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 5cc4b871cb11..f27fe44230c2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1335,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) | |||
1335 | } | 1335 | } |
1336 | EXPORT_SYMBOL(blk_mq_map_queue); | 1336 | EXPORT_SYMBOL(blk_mq_map_queue); |
1337 | 1337 | ||
1338 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, | ||
1339 | unsigned int hctx_index, | ||
1340 | int node) | ||
1341 | { | ||
1342 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); | ||
1343 | } | ||
1344 | EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); | ||
1345 | |||
1346 | void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, | ||
1347 | unsigned int hctx_index) | ||
1348 | { | ||
1349 | kfree(hctx); | ||
1350 | } | ||
1351 | EXPORT_SYMBOL(blk_mq_free_single_hw_queue); | ||
1352 | |||
1353 | static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, | 1338 | static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, |
1354 | struct blk_mq_tags *tags, unsigned int hctx_idx) | 1339 | struct blk_mq_tags *tags, unsigned int hctx_idx) |
1355 | { | 1340 | { |
@@ -1590,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q, | |||
1590 | 1575 | ||
1591 | queue_for_each_hw_ctx(q, hctx, i) { | 1576 | queue_for_each_hw_ctx(q, hctx, i) { |
1592 | free_cpumask_var(hctx->cpumask); | 1577 | free_cpumask_var(hctx->cpumask); |
1593 | set->ops->free_hctx(hctx, i); | 1578 | kfree(hctx); |
1594 | } | 1579 | } |
1595 | } | 1580 | } |
1596 | 1581 | ||
@@ -1811,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) | |||
1811 | for (i = 0; i < set->nr_hw_queues; i++) { | 1796 | for (i = 0; i < set->nr_hw_queues; i++) { |
1812 | int node = blk_mq_hw_queue_to_node(map, i); | 1797 | int node = blk_mq_hw_queue_to_node(map, i); |
1813 | 1798 | ||
1814 | hctxs[i] = set->ops->alloc_hctx(set, i, node); | 1799 | hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), |
1800 | GFP_KERNEL, node); | ||
1815 | if (!hctxs[i]) | 1801 | if (!hctxs[i]) |
1816 | goto err_hctxs; | 1802 | goto err_hctxs; |
1817 | 1803 | ||
@@ -1898,7 +1884,7 @@ err_hctxs: | |||
1898 | if (!hctxs[i]) | 1884 | if (!hctxs[i]) |
1899 | break; | 1885 | break; |
1900 | free_cpumask_var(hctxs[i]->cpumask); | 1886 | free_cpumask_var(hctxs[i]->cpumask); |
1901 | set->ops->free_hctx(hctxs[i], i); | 1887 | kfree(hctxs[i]); |
1902 | } | 1888 | } |
1903 | err_map: | 1889 | err_map: |
1904 | kfree(hctxs); | 1890 | kfree(hctxs); |
@@ -1983,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) | |||
1983 | if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) | 1969 | if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) |
1984 | return -EINVAL; | 1970 | return -EINVAL; |
1985 | 1971 | ||
1986 | if (!set->nr_hw_queues || | 1972 | if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) |
1987 | !set->ops->queue_rq || !set->ops->map_queue || | ||
1988 | !set->ops->alloc_hctx || !set->ops->free_hctx) | ||
1989 | return -EINVAL; | 1973 | return -EINVAL; |
1990 | 1974 | ||
1991 | 1975 | ||