aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-mq.h34
1 files changed, 16 insertions, 18 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 29c1a6e83814..a4ea0ce83b07 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -33,8 +33,6 @@ struct blk_mq_hw_ctx {
33 unsigned int nr_ctx_map; 33 unsigned int nr_ctx_map;
34 unsigned long *ctx_map; 34 unsigned long *ctx_map;
35 35
36 struct request **rqs;
37 struct list_head page_list;
38 struct blk_mq_tags *tags; 36 struct blk_mq_tags *tags;
39 37
40 unsigned long queued; 38 unsigned long queued;
@@ -42,7 +40,6 @@ struct blk_mq_hw_ctx {
42#define BLK_MQ_MAX_DISPATCH_ORDER 10 40#define BLK_MQ_MAX_DISPATCH_ORDER 10
43 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 41 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
44 42
45 unsigned int queue_depth;
46 unsigned int numa_node; 43 unsigned int numa_node;
47 unsigned int cmd_size; /* per-request extra data */ 44 unsigned int cmd_size; /* per-request extra data */
48 45
@@ -50,7 +47,7 @@ struct blk_mq_hw_ctx {
50 struct kobject kobj; 47 struct kobject kobj;
51}; 48};
52 49
53struct blk_mq_reg { 50struct blk_mq_tag_set {
54 struct blk_mq_ops *ops; 51 struct blk_mq_ops *ops;
55 unsigned int nr_hw_queues; 52 unsigned int nr_hw_queues;
56 unsigned int queue_depth; 53 unsigned int queue_depth;
@@ -59,18 +56,22 @@ struct blk_mq_reg {
59 int numa_node; 56 int numa_node;
60 unsigned int timeout; 57 unsigned int timeout;
61 unsigned int flags; /* BLK_MQ_F_* */ 58 unsigned int flags; /* BLK_MQ_F_* */
59 void *driver_data;
60
61 struct blk_mq_tags **tags;
62}; 62};
63 63
64typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); 64typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
65typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 65typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
66typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int); 66typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
67 unsigned int);
67typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 68typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
68typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 69typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
69typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 70typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
70typedef int (init_request_fn)(void *, struct blk_mq_hw_ctx *, 71typedef int (init_request_fn)(void *, struct request *, unsigned int,
71 struct request *, unsigned int); 72 unsigned int, unsigned int);
72typedef void (exit_request_fn)(void *, struct blk_mq_hw_ctx *, 73typedef void (exit_request_fn)(void *, struct request *, unsigned int,
73 struct request *, unsigned int); 74 unsigned int);
74 75
75struct blk_mq_ops { 76struct blk_mq_ops {
76 /* 77 /*
@@ -127,10 +128,13 @@ enum {
127 BLK_MQ_MAX_DEPTH = 2048, 128 BLK_MQ_MAX_DEPTH = 2048,
128}; 129};
129 130
130struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); 131struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
131int blk_mq_register_disk(struct gendisk *); 132int blk_mq_register_disk(struct gendisk *);
132void blk_mq_unregister_disk(struct gendisk *); 133void blk_mq_unregister_disk(struct gendisk *);
133 134
135int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
136void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
137
134void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 138void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
135 139
136void blk_mq_insert_request(struct request *, bool, bool, bool); 140void blk_mq_insert_request(struct request *, bool, bool, bool);
@@ -139,10 +143,10 @@ void blk_mq_free_request(struct request *rq);
139bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 143bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
140struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); 144struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
141struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); 145struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
142struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); 146struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
143 147
144struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 148struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
145struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); 149struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int);
146void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); 150void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
147 151
148bool blk_mq_end_io_partial(struct request *rq, int error, 152bool blk_mq_end_io_partial(struct request *rq, int error,
@@ -173,12 +177,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
173 return (void *) rq + sizeof(*rq); 177 return (void *) rq + sizeof(*rq);
174} 178}
175 179
176static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
177 unsigned int tag)
178{
179 return hctx->rqs[tag];
180}
181
182#define queue_for_each_hw_ctx(q, hctx, i) \ 180#define queue_for_each_hw_ctx(q, hctx, i) \
183 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 181 for ((i) = 0; (i) < (q)->nr_hw_queues && \
184 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 182 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)