aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-05-28 12:11:06 -0400
committerJens Axboe <axboe@fb.com>2014-05-28 12:18:31 -0400
commitcdef54dd85ad66e77262ea57796a3e81683dd5d6 (patch)
tree55ae5b4f46a884b15879eaab19c7af72db23927b
parent75bb4625bb78d6a2d879dcb6a7d482861295765b (diff)
blk-mq: remove alloc_hctx and free_hctx methods
There is no need for drivers to control hardware context allocation now that we do the context to node mapping in common code. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq.c26
-rw-r--r--drivers/block/null_blk.c28
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--include/linux/blk-mq.h10
4 files changed, 6 insertions, 60 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5cc4b871cb11..f27fe44230c2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1335,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1335} 1335}
1336EXPORT_SYMBOL(blk_mq_map_queue); 1336EXPORT_SYMBOL(blk_mq_map_queue);
1337 1337
1338struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
1339 unsigned int hctx_index,
1340 int node)
1341{
1342 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
1343}
1344EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1345
1346void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1347 unsigned int hctx_index)
1348{
1349 kfree(hctx);
1350}
1351EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1352
1353static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, 1338static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1354 struct blk_mq_tags *tags, unsigned int hctx_idx) 1339 struct blk_mq_tags *tags, unsigned int hctx_idx)
1355{ 1340{
@@ -1590,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
1590 1575
1591 queue_for_each_hw_ctx(q, hctx, i) { 1576 queue_for_each_hw_ctx(q, hctx, i) {
1592 free_cpumask_var(hctx->cpumask); 1577 free_cpumask_var(hctx->cpumask);
1593 set->ops->free_hctx(hctx, i); 1578 kfree(hctx);
1594 } 1579 }
1595} 1580}
1596 1581
@@ -1811,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1811 for (i = 0; i < set->nr_hw_queues; i++) { 1796 for (i = 0; i < set->nr_hw_queues; i++) {
1812 int node = blk_mq_hw_queue_to_node(map, i); 1797 int node = blk_mq_hw_queue_to_node(map, i);
1813 1798
1814 hctxs[i] = set->ops->alloc_hctx(set, i, node); 1799 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1800 GFP_KERNEL, node);
1815 if (!hctxs[i]) 1801 if (!hctxs[i])
1816 goto err_hctxs; 1802 goto err_hctxs;
1817 1803
@@ -1898,7 +1884,7 @@ err_hctxs:
1898 if (!hctxs[i]) 1884 if (!hctxs[i])
1899 break; 1885 break;
1900 free_cpumask_var(hctxs[i]->cpumask); 1886 free_cpumask_var(hctxs[i]->cpumask);
1901 set->ops->free_hctx(hctxs[i], i); 1887 kfree(hctxs[i]);
1902 } 1888 }
1903err_map: 1889err_map:
1904 kfree(hctxs); 1890 kfree(hctxs);
@@ -1983,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1983 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) 1969 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1984 return -EINVAL; 1970 return -EINVAL;
1985 1971
1986 if (!set->nr_hw_queues || 1972 if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1987 !set->ops->queue_rq || !set->ops->map_queue ||
1988 !set->ops->alloc_hctx || !set->ops->free_hctx)
1989 return -EINVAL; 1973 return -EINVAL;
1990 1974
1991 1975
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 4d33c8c25fbf..b40af63a5476 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -321,18 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
321 return BLK_MQ_RQ_QUEUE_OK; 321 return BLK_MQ_RQ_QUEUE_OK;
322} 322}
323 323
324static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
325 unsigned int hctx_index,
326 int node)
327{
328 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
329}
330
331static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
332{
333 kfree(hctx);
334}
335
336static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) 324static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
337{ 325{
338 BUG_ON(!nullb); 326 BUG_ON(!nullb);
@@ -360,17 +348,6 @@ static struct blk_mq_ops null_mq_ops = {
360 .map_queue = blk_mq_map_queue, 348 .map_queue = blk_mq_map_queue,
361 .init_hctx = null_init_hctx, 349 .init_hctx = null_init_hctx,
362 .complete = null_softirq_done_fn, 350 .complete = null_softirq_done_fn,
363 .alloc_hctx = blk_mq_alloc_single_hw_queue,
364 .free_hctx = blk_mq_free_single_hw_queue,
365};
366
367static struct blk_mq_ops null_mq_ops_pernode = {
368 .queue_rq = null_queue_rq,
369 .map_queue = blk_mq_map_queue,
370 .init_hctx = null_init_hctx,
371 .complete = null_softirq_done_fn,
372 .alloc_hctx = null_alloc_hctx,
373 .free_hctx = null_free_hctx,
374}; 351};
375 352
376static void null_del_dev(struct nullb *nullb) 353static void null_del_dev(struct nullb *nullb)
@@ -496,10 +473,7 @@ static int null_add_dev(void)
496 goto out_free_nullb; 473 goto out_free_nullb;
497 474
498 if (queue_mode == NULL_Q_MQ) { 475 if (queue_mode == NULL_Q_MQ) {
499 if (use_per_node_hctx) 476 nullb->tag_set.ops = &null_mq_ops;
500 nullb->tag_set.ops = &null_mq_ops_pernode;
501 else
502 nullb->tag_set.ops = &null_mq_ops;
503 nullb->tag_set.nr_hw_queues = submit_queues; 477 nullb->tag_set.nr_hw_queues = submit_queues;
504 nullb->tag_set.queue_depth = hw_queue_depth; 478 nullb->tag_set.queue_depth = hw_queue_depth;
505 nullb->tag_set.numa_node = home_node; 479 nullb->tag_set.numa_node = home_node;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7a51f065edcd..16c21c0cb14d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -497,8 +497,6 @@ static int virtblk_init_request(void *data, struct request *rq,
497static struct blk_mq_ops virtio_mq_ops = { 497static struct blk_mq_ops virtio_mq_ops = {
498 .queue_rq = virtio_queue_rq, 498 .queue_rq = virtio_queue_rq,
499 .map_queue = blk_mq_map_queue, 499 .map_queue = blk_mq_map_queue,
500 .alloc_hctx = blk_mq_alloc_single_hw_queue,
501 .free_hctx = blk_mq_free_single_hw_queue,
502 .complete = virtblk_request_done, 500 .complete = virtblk_request_done,
503 .init_request = virtblk_init_request, 501 .init_request = virtblk_init_request,
504}; 502};
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2bd82f399128..91dfb75ce39f 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -79,9 +79,6 @@ struct blk_mq_tag_set {
79 79
80typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); 80typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
81typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 81typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
82typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
83 unsigned int, int);
84typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
85typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 82typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
86typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 83typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
87typedef int (init_request_fn)(void *, struct request *, unsigned int, 84typedef int (init_request_fn)(void *, struct request *, unsigned int,
@@ -108,12 +105,6 @@ struct blk_mq_ops {
108 softirq_done_fn *complete; 105 softirq_done_fn *complete;
109 106
110 /* 107 /*
111 * Override for hctx allocations (should probably go)
112 */
113 alloc_hctx_fn *alloc_hctx;
114 free_hctx_fn *free_hctx;
115
116 /*
117 * Called when the block layer side of a hardware queue has been 108 * Called when the block layer side of a hardware queue has been
118 * set up, allowing the driver to allocate/init matching structures. 109 * set up, allowing the driver to allocate/init matching structures.
119 * Ditto for exit/teardown. 110 * Ditto for exit/teardown.
@@ -166,7 +157,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
166 157
167struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 158struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
168struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 159struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
169void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
170 160
171void blk_mq_end_io(struct request *rq, int error); 161void blk_mq_end_io(struct request *rq, int error);
172void __blk_mq_end_io(struct request *rq, int error); 162void __blk_mq_end_io(struct request *rq, int error);