diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-11-01 18:41:41 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-07 15:42:32 -0500 |
commit | f9cd4bfe96955e7a1d3ec54b393dee87b815ba3b (patch) | |
tree | 5d3e5dcd72b1ddbad1b592dfbbf1fa22f92fa6d2 /block/blk-mq-sched.h | |
parent | a1ce35fa49852db60fc6e268038530be533c5b15 (diff) |
block: get rid of MQ scheduler ops union
This is a remnant of when we had ops for both SQ and MQ
schedulers. Now it's just MQ, so get rid of the union.
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.h')
-rw-r--r-- | block/blk-mq-sched.h | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 8a9544203173..947f236b273d 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h | |||
@@ -43,8 +43,8 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, | |||
43 | { | 43 | { |
44 | struct elevator_queue *e = q->elevator; | 44 | struct elevator_queue *e = q->elevator; |
45 | 45 | ||
46 | if (e && e->type->ops.mq.allow_merge) | 46 | if (e && e->type->ops.allow_merge) |
47 | return e->type->ops.mq.allow_merge(q, rq, bio); | 47 | return e->type->ops.allow_merge(q, rq, bio); |
48 | 48 | ||
49 | return true; | 49 | return true; |
50 | } | 50 | } |
@@ -53,8 +53,8 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) | |||
53 | { | 53 | { |
54 | struct elevator_queue *e = rq->q->elevator; | 54 | struct elevator_queue *e = rq->q->elevator; |
55 | 55 | ||
56 | if (e && e->type->ops.mq.completed_request) | 56 | if (e && e->type->ops.completed_request) |
57 | e->type->ops.mq.completed_request(rq, now); | 57 | e->type->ops.completed_request(rq, now); |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline void blk_mq_sched_started_request(struct request *rq) | 60 | static inline void blk_mq_sched_started_request(struct request *rq) |
@@ -62,8 +62,8 @@ static inline void blk_mq_sched_started_request(struct request *rq) | |||
62 | struct request_queue *q = rq->q; | 62 | struct request_queue *q = rq->q; |
63 | struct elevator_queue *e = q->elevator; | 63 | struct elevator_queue *e = q->elevator; |
64 | 64 | ||
65 | if (e && e->type->ops.mq.started_request) | 65 | if (e && e->type->ops.started_request) |
66 | e->type->ops.mq.started_request(rq); | 66 | e->type->ops.started_request(rq); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline void blk_mq_sched_requeue_request(struct request *rq) | 69 | static inline void blk_mq_sched_requeue_request(struct request *rq) |
@@ -71,16 +71,16 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) | |||
71 | struct request_queue *q = rq->q; | 71 | struct request_queue *q = rq->q; |
72 | struct elevator_queue *e = q->elevator; | 72 | struct elevator_queue *e = q->elevator; |
73 | 73 | ||
74 | if (e && e->type->ops.mq.requeue_request) | 74 | if (e && e->type->ops.requeue_request) |
75 | e->type->ops.mq.requeue_request(rq); | 75 | e->type->ops.requeue_request(rq); |
76 | } | 76 | } |
77 | 77 | ||
78 | static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) | 78 | static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) |
79 | { | 79 | { |
80 | struct elevator_queue *e = hctx->queue->elevator; | 80 | struct elevator_queue *e = hctx->queue->elevator; |
81 | 81 | ||
82 | if (e && e->type->ops.mq.has_work) | 82 | if (e && e->type->ops.has_work) |
83 | return e->type->ops.mq.has_work(hctx); | 83 | return e->type->ops.has_work(hctx); |
84 | 84 | ||
85 | return false; | 85 | return false; |
86 | } | 86 | } |