diff options
Diffstat (limited to 'block/blk-mq-sched.h')
-rw-r--r-- | block/blk-mq-sched.h | 45 |
1 files changed, 6 insertions, 39 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index becbc7840364..9478aaeb48c5 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h | |||
@@ -21,6 +21,12 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); | |||
21 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); | 21 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); |
22 | void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); | 22 | void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); |
23 | 23 | ||
24 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, | ||
25 | bool run_queue, bool async, bool can_block); | ||
26 | void blk_mq_sched_insert_requests(struct request_queue *q, | ||
27 | struct blk_mq_ctx *ctx, | ||
28 | struct list_head *list, bool run_queue_async); | ||
29 | |||
24 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); | 30 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); |
25 | void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, | 31 | void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, |
26 | struct list_head *rq_list, | 32 | struct list_head *rq_list, |
@@ -62,45 +68,6 @@ static inline void blk_mq_sched_put_rq_priv(struct request_queue *q, | |||
62 | e->type->ops.mq.put_rq_priv(q, rq); | 68 | e->type->ops.mq.put_rq_priv(q, rq); |
63 | } | 69 | } |
64 | 70 | ||
65 | static inline void | ||
66 | blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, | ||
67 | bool async) | ||
68 | { | ||
69 | struct request_queue *q = rq->q; | ||
70 | struct elevator_queue *e = q->elevator; | ||
71 | struct blk_mq_ctx *ctx = rq->mq_ctx; | ||
72 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | ||
73 | |||
74 | if (e && e->type->ops.mq.insert_requests) { | ||
75 | LIST_HEAD(list); | ||
76 | |||
77 | list_add(&rq->queuelist, &list); | ||
78 | e->type->ops.mq.insert_requests(hctx, &list, at_head); | ||
79 | } else { | ||
80 | spin_lock(&ctx->lock); | ||
81 | __blk_mq_insert_request(hctx, rq, at_head); | ||
82 | spin_unlock(&ctx->lock); | ||
83 | } | ||
84 | |||
85 | if (run_queue) | ||
86 | blk_mq_run_hw_queue(hctx, async); | ||
87 | } | ||
88 | |||
89 | static inline void | ||
90 | blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx, | ||
91 | struct list_head *list, bool run_queue_async) | ||
92 | { | ||
93 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | ||
94 | struct elevator_queue *e = hctx->queue->elevator; | ||
95 | |||
96 | if (e && e->type->ops.mq.insert_requests) | ||
97 | e->type->ops.mq.insert_requests(hctx, list, false); | ||
98 | else | ||
99 | blk_mq_insert_requests(hctx, ctx, list); | ||
100 | |||
101 | blk_mq_run_hw_queue(hctx, run_queue_async); | ||
102 | } | ||
103 | |||
104 | static inline bool | 71 | static inline bool |
105 | blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, | 72 | blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, |
106 | struct bio *bio) | 73 | struct bio *bio) |