summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-17 08:03:22 -0500
committerJens Axboe <axboe@fb.com>2017-01-17 12:04:20 -0500
commitbd166ef183c263c5ced656d49ef19c7da4adc774 (patch)
tree449bbd3b4e671b370b96e3846b2281116e7089e9 /block/blk-mq-sched.h
parent2af8cbe30531eca73c8f3ba277f155fc0020b01a (diff)
blk-mq-sched: add framework for MQ capable IO schedulers
This adds a set of hooks that intercepts the blk-mq path of allocating/inserting/issuing/completing requests, allowing us to develop a scheduler within that framework. We reuse the existing elevator scheduler API on the registration side, but augment that with the scheduler flagging support for the blk-mq interfce, and with a separate set of ops hooks for MQ devices. We split driver and scheduler tags, so we can run the scheduling independently of device queue depth. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block/blk-mq-sched.h')
-rw-r--r--block/blk-mq-sched.h170
1 files changed, 170 insertions, 0 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
new file mode 100644
index 000000000000..35c49e2e008a
--- /dev/null
+++ b/block/blk-mq-sched.h
@@ -0,0 +1,170 @@
1#ifndef BLK_MQ_SCHED_H
2#define BLK_MQ_SCHED_H
3
4#include "blk-mq.h"
5#include "blk-mq-tag.h"
6
7int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
8 int (*init)(struct blk_mq_hw_ctx *),
9 void (*exit)(struct blk_mq_hw_ctx *));
10
11void blk_mq_sched_free_hctx_data(struct request_queue *q,
12 void (*exit)(struct blk_mq_hw_ctx *));
13
14struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
15void blk_mq_sched_put_request(struct request *rq);
16
17void blk_mq_sched_request_inserted(struct request *rq);
18bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq);
19bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
22
23void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
24void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
25 struct list_head *rq_list,
26 struct request *(*get_rq)(struct blk_mq_hw_ctx *));
27
28int blk_mq_sched_setup(struct request_queue *q);
29void blk_mq_sched_teardown(struct request_queue *q);
30
31static inline bool
32blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
33{
34 struct elevator_queue *e = q->elevator;
35
36 if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
37 return false;
38
39 return __blk_mq_sched_bio_merge(q, bio);
40}
41
42static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
43 struct request *rq)
44{
45 struct elevator_queue *e = q->elevator;
46
47 if (e && e->type->ops.mq.get_rq_priv)
48 return e->type->ops.mq.get_rq_priv(q, rq);
49
50 return 0;
51}
52
53static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
54 struct request *rq)
55{
56 struct elevator_queue *e = q->elevator;
57
58 if (e && e->type->ops.mq.put_rq_priv)
59 e->type->ops.mq.put_rq_priv(q, rq);
60}
61
62static inline void
63blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue,
64 bool async)
65{
66 struct request_queue *q = rq->q;
67 struct elevator_queue *e = q->elevator;
68 struct blk_mq_ctx *ctx = rq->mq_ctx;
69 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
70
71 if (e && e->type->ops.mq.insert_requests) {
72 LIST_HEAD(list);
73
74 list_add(&rq->queuelist, &list);
75 e->type->ops.mq.insert_requests(hctx, &list, at_head);
76 } else {
77 spin_lock(&ctx->lock);
78 __blk_mq_insert_request(hctx, rq, at_head);
79 spin_unlock(&ctx->lock);
80 }
81
82 if (run_queue)
83 blk_mq_run_hw_queue(hctx, async);
84}
85
86static inline void
87blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx,
88 struct list_head *list, bool run_queue_async)
89{
90 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
91 struct elevator_queue *e = hctx->queue->elevator;
92
93 if (e && e->type->ops.mq.insert_requests)
94 e->type->ops.mq.insert_requests(hctx, list, false);
95 else
96 blk_mq_insert_requests(hctx, ctx, list);
97
98 blk_mq_run_hw_queue(hctx, run_queue_async);
99}
100
101static inline bool
102blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
103 struct bio *bio)
104{
105 struct elevator_queue *e = q->elevator;
106
107 if (e && e->type->ops.mq.allow_merge)
108 return e->type->ops.mq.allow_merge(q, rq, bio);
109
110 return true;
111}
112
113static inline void
114blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
115{
116 struct elevator_queue *e = hctx->queue->elevator;
117
118 if (e && e->type->ops.mq.completed_request)
119 e->type->ops.mq.completed_request(hctx, rq);
120
121 BUG_ON(rq->internal_tag == -1);
122
123 blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
124
125 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
126 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
127 blk_mq_run_hw_queue(hctx, true);
128 }
129}
130
131static inline void blk_mq_sched_started_request(struct request *rq)
132{
133 struct request_queue *q = rq->q;
134 struct elevator_queue *e = q->elevator;
135
136 if (e && e->type->ops.mq.started_request)
137 e->type->ops.mq.started_request(rq);
138}
139
140static inline void blk_mq_sched_requeue_request(struct request *rq)
141{
142 struct request_queue *q = rq->q;
143 struct elevator_queue *e = q->elevator;
144
145 if (e && e->type->ops.mq.requeue_request)
146 e->type->ops.mq.requeue_request(rq);
147}
148
149static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
150{
151 struct elevator_queue *e = hctx->queue->elevator;
152
153 if (e && e->type->ops.mq.has_work)
154 return e->type->ops.mq.has_work(hctx);
155
156 return false;
157}
158
159static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx)
160{
161 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
162 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
163}
164
165static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
166{
167 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
168}
169
170#endif