aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blk-mq.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-09 20:29:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-09 20:29:33 -0400
commit12e3d3cdd975fe986cc5c35f60b1467a8ec20b80 (patch)
tree14ec935d2e15f454ba69353fcf5329ac67f72e4f /include/linux/blk-mq.h
parent48915c2cbc77eceec2005afb695ac658fede4e0d (diff)
parent8ec2ef2b66ea2fd00acc28aca8edaad441dbb424 (diff)
Merge branch 'for-4.9/block-irq' of git://git.kernel.dk/linux-block
Pull blk-mq irq/cpu mapping updates from Jens Axboe: "This is the block-irq topic branch for 4.9-rc. It's mostly from Christoph, and it allows drivers to specify their own mappings, and more importantly, to share the blk-mq mappings with the IRQ affinity mappings. It's a good step towards making this work better out of the box" * 'for-4.9/block-irq' of git://git.kernel.dk/linux-block: blk_mq: linux/blk-mq.h does not include all the headers it depends on blk-mq: kill unused blk_mq_create_mq_map() blk-mq: get rid of the cpumask in struct blk_mq_tags nvme: remove the post_scan callout nvme: switch to use pci_alloc_irq_vectors blk-mq: provide a default queue mapping for PCI device blk-mq: allow the driver to pass in a queue mapping blk-mq: remove ->map_queue blk-mq: only allocate a single mq_map per tag_set blk-mq: don't redistribute hardware queues on a CPU hotplug event
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r--include/linux/blk-mq.h12
1 files changed, 4 insertions, 8 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 5daa0ef756dd..ef6aebf291ed 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -62,6 +62,7 @@ struct blk_mq_hw_ctx {
62}; 62};
63 63
64struct blk_mq_tag_set { 64struct blk_mq_tag_set {
65 unsigned int *mq_map;
65 struct blk_mq_ops *ops; 66 struct blk_mq_ops *ops;
66 unsigned int nr_hw_queues; 67 unsigned int nr_hw_queues;
67 unsigned int queue_depth; /* max hw supported */ 68 unsigned int queue_depth; /* max hw supported */
@@ -85,7 +86,6 @@ struct blk_mq_queue_data {
85}; 86};
86 87
87typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 88typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
88typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
89typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 89typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
90typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 90typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
91typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 91typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -99,6 +99,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
99 bool); 99 bool);
100typedef void (busy_tag_iter_fn)(struct request *, void *, bool); 100typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
101typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); 101typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
102typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
102 103
103 104
104struct blk_mq_ops { 105struct blk_mq_ops {
@@ -108,11 +109,6 @@ struct blk_mq_ops {
108 queue_rq_fn *queue_rq; 109 queue_rq_fn *queue_rq;
109 110
110 /* 111 /*
111 * Map to specific hardware queue
112 */
113 map_queue_fn *map_queue;
114
115 /*
116 * Called on request timeout 112 * Called on request timeout
117 */ 113 */
118 timeout_fn *timeout; 114 timeout_fn *timeout;
@@ -144,6 +140,8 @@ struct blk_mq_ops {
144 init_request_fn *init_request; 140 init_request_fn *init_request;
145 exit_request_fn *exit_request; 141 exit_request_fn *exit_request;
146 reinit_request_fn *reinit_request; 142 reinit_request_fn *reinit_request;
143
144 map_queues_fn *map_queues;
147}; 145};
148 146
149enum { 147enum {
@@ -199,7 +197,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
199struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op, 197struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
200 unsigned int flags, unsigned int hctx_idx); 198 unsigned int flags, unsigned int hctx_idx);
201struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 199struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
202struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
203 200
204enum { 201enum {
205 BLK_MQ_UNIQUE_TAG_BITS = 16, 202 BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -218,7 +215,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
218 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; 215 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
219} 216}
220 217
221struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
222 218
223int blk_mq_request_started(struct request *rq); 219int blk_mq_request_started(struct request *rq);
224void blk_mq_start_request(struct request *rq); 220void blk_mq_start_request(struct request *rq);