summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c158
1 files changed, 65 insertions, 93 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 0ded5e846335..7f0dc48ffb40 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -31,11 +31,10 @@ void blk_mq_sched_free_hctx_data(struct request_queue *q,
31} 31}
32EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); 32EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
33 33
34static void __blk_mq_sched_assign_ioc(struct request_queue *q, 34void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
35 struct request *rq,
36 struct bio *bio,
37 struct io_context *ioc)
38{ 35{
36 struct request_queue *q = rq->q;
37 struct io_context *ioc = rq_ioc(bio);
39 struct io_cq *icq; 38 struct io_cq *icq;
40 39
41 spin_lock_irq(q->queue_lock); 40 spin_lock_irq(q->queue_lock);
@@ -47,25 +46,8 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
47 if (!icq) 46 if (!icq)
48 return; 47 return;
49 } 48 }
50 49 get_io_context(icq->ioc);
51 rq->elv.icq = icq; 50 rq->elv.icq = icq;
52 if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
53 rq->rq_flags |= RQF_ELVPRIV;
54 get_io_context(icq->ioc);
55 return;
56 }
57
58 rq->elv.icq = NULL;
59}
60
61static void blk_mq_sched_assign_ioc(struct request_queue *q,
62 struct request *rq, struct bio *bio)
63{
64 struct io_context *ioc;
65
66 ioc = rq_ioc(bio);
67 if (ioc)
68 __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
69} 51}
70 52
71/* 53/*
@@ -107,71 +89,6 @@ static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
107 return false; 89 return false;
108} 90}
109 91
110struct request *blk_mq_sched_get_request(struct request_queue *q,
111 struct bio *bio,
112 unsigned int op,
113 struct blk_mq_alloc_data *data)
114{
115 struct elevator_queue *e = q->elevator;
116 struct request *rq;
117
118 blk_queue_enter_live(q);
119 data->q = q;
120 if (likely(!data->ctx))
121 data->ctx = blk_mq_get_ctx(q);
122 if (likely(!data->hctx))
123 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
124
125 if (e) {
126 data->flags |= BLK_MQ_REQ_INTERNAL;
127
128 /*
129 * Flush requests are special and go directly to the
130 * dispatch list.
131 */
132 if (!op_is_flush(op) && e->type->ops.mq.get_request) {
133 rq = e->type->ops.mq.get_request(q, op, data);
134 if (rq)
135 rq->rq_flags |= RQF_QUEUED;
136 } else
137 rq = __blk_mq_alloc_request(data, op);
138 } else {
139 rq = __blk_mq_alloc_request(data, op);
140 }
141
142 if (rq) {
143 if (!op_is_flush(op)) {
144 rq->elv.icq = NULL;
145 if (e && e->type->icq_cache)
146 blk_mq_sched_assign_ioc(q, rq, bio);
147 }
148 data->hctx->queued++;
149 return rq;
150 }
151
152 blk_queue_exit(q);
153 return NULL;
154}
155
156void blk_mq_sched_put_request(struct request *rq)
157{
158 struct request_queue *q = rq->q;
159 struct elevator_queue *e = q->elevator;
160
161 if (rq->rq_flags & RQF_ELVPRIV) {
162 blk_mq_sched_put_rq_priv(rq->q, rq);
163 if (rq->elv.icq) {
164 put_io_context(rq->elv.icq->ioc);
165 rq->elv.icq = NULL;
166 }
167 }
168
169 if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
170 e->type->ops.mq.put_request(rq);
171 else
172 blk_mq_finish_request(rq);
173}
174
175void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) 92void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
176{ 93{
177 struct request_queue *q = hctx->queue; 94 struct request_queue *q = hctx->queue;
@@ -180,7 +97,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
180 bool did_work = false; 97 bool did_work = false;
181 LIST_HEAD(rq_list); 98 LIST_HEAD(rq_list);
182 99
183 if (unlikely(blk_mq_hctx_stopped(hctx))) 100 /* RCU or SRCU read lock is needed before checking quiesced flag */
101 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
184 return; 102 return;
185 103
186 hctx->run++; 104 hctx->run++;
@@ -260,19 +178,73 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
260} 178}
261EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); 179EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
262 180
181/*
182 * Reverse check our software queue for entries that we could potentially
183 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
184 * too much time checking for merges.
185 */
186static bool blk_mq_attempt_merge(struct request_queue *q,
187 struct blk_mq_ctx *ctx, struct bio *bio)
188{
189 struct request *rq;
190 int checked = 8;
191
192 lockdep_assert_held(&ctx->lock);
193
194 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
195 bool merged = false;
196
197 if (!checked--)
198 break;
199
200 if (!blk_rq_merge_ok(rq, bio))
201 continue;
202
203 switch (blk_try_merge(rq, bio)) {
204 case ELEVATOR_BACK_MERGE:
205 if (blk_mq_sched_allow_merge(q, rq, bio))
206 merged = bio_attempt_back_merge(q, rq, bio);
207 break;
208 case ELEVATOR_FRONT_MERGE:
209 if (blk_mq_sched_allow_merge(q, rq, bio))
210 merged = bio_attempt_front_merge(q, rq, bio);
211 break;
212 case ELEVATOR_DISCARD_MERGE:
213 merged = bio_attempt_discard_merge(q, rq, bio);
214 break;
215 default:
216 continue;
217 }
218
219 if (merged)
220 ctx->rq_merged++;
221 return merged;
222 }
223
224 return false;
225}
226
263bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 227bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
264{ 228{
265 struct elevator_queue *e = q->elevator; 229 struct elevator_queue *e = q->elevator;
230 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
231 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
232 bool ret = false;
266 233
267 if (e->type->ops.mq.bio_merge) { 234 if (e && e->type->ops.mq.bio_merge) {
268 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
269 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
270
271 blk_mq_put_ctx(ctx); 235 blk_mq_put_ctx(ctx);
272 return e->type->ops.mq.bio_merge(hctx, bio); 236 return e->type->ops.mq.bio_merge(hctx, bio);
273 } 237 }
274 238
275 return false; 239 if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
240 /* default per sw-queue merge */
241 spin_lock(&ctx->lock);
242 ret = blk_mq_attempt_merge(q, ctx, bio);
243 spin_unlock(&ctx->lock);
244 }
245
246 blk_mq_put_ctx(ctx);
247 return ret;
276} 248}
277 249
278bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) 250bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)