diff options
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r-- | block/blk-mq-sched.c | 58 |
1 files changed, 58 insertions, 0 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 4139b07ab33b..1112752f888d 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
@@ -335,6 +335,64 @@ void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) | |||
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
338 | /* | ||
339 | * Add flush/fua to the queue. If we fail getting a driver tag, then | ||
340 | * punt to the requeue list. Requeue will re-invoke us from a context | ||
341 | * that's safe to block from. | ||
342 | */ | ||
343 | static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx, | ||
344 | struct request *rq, bool can_block) | ||
345 | { | ||
346 | if (blk_mq_get_driver_tag(rq, &hctx, can_block)) { | ||
347 | blk_insert_flush(rq); | ||
348 | blk_mq_run_hw_queue(hctx, true); | ||
349 | } else | ||
350 | blk_mq_add_to_requeue_list(rq, true, true); | ||
351 | } | ||
352 | |||
353 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, | ||
354 | bool run_queue, bool async, bool can_block) | ||
355 | { | ||
356 | struct request_queue *q = rq->q; | ||
357 | struct elevator_queue *e = q->elevator; | ||
358 | struct blk_mq_ctx *ctx = rq->mq_ctx; | ||
359 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | ||
360 | |||
361 | if (rq->tag == -1 && (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))) { | ||
362 | blk_mq_sched_insert_flush(hctx, rq, can_block); | ||
363 | return; | ||
364 | } | ||
365 | |||
366 | if (e && e->type->ops.mq.insert_requests) { | ||
367 | LIST_HEAD(list); | ||
368 | |||
369 | list_add(&rq->queuelist, &list); | ||
370 | e->type->ops.mq.insert_requests(hctx, &list, at_head); | ||
371 | } else { | ||
372 | spin_lock(&ctx->lock); | ||
373 | __blk_mq_insert_request(hctx, rq, at_head); | ||
374 | spin_unlock(&ctx->lock); | ||
375 | } | ||
376 | |||
377 | if (run_queue) | ||
378 | blk_mq_run_hw_queue(hctx, async); | ||
379 | } | ||
380 | |||
381 | void blk_mq_sched_insert_requests(struct request_queue *q, | ||
382 | struct blk_mq_ctx *ctx, | ||
383 | struct list_head *list, bool run_queue_async) | ||
384 | { | ||
385 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | ||
386 | struct elevator_queue *e = hctx->queue->elevator; | ||
387 | |||
388 | if (e && e->type->ops.mq.insert_requests) | ||
389 | e->type->ops.mq.insert_requests(hctx, list, false); | ||
390 | else | ||
391 | blk_mq_insert_requests(hctx, ctx, list); | ||
392 | |||
393 | blk_mq_run_hw_queue(hctx, run_queue_async); | ||
394 | } | ||
395 | |||
338 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, | 396 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, |
339 | struct blk_mq_hw_ctx *hctx, | 397 | struct blk_mq_hw_ctx *hctx, |
340 | unsigned int hctx_idx) | 398 | unsigned int hctx_idx) |