diff options
author | Christoph Hellwig <hch@lst.de> | 2017-10-03 04:47:00 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-10-03 10:43:04 -0400 |
commit | 9c9883744dda1cc38339a448dd8435140537027e (patch) | |
tree | f12a16e699415343d5757d7b118dc59f6a295d3d /block/blk.h | |
parent | 7cb04004fa371a626c1a5ebe6d977f70285759ed (diff) |
block: move __elv_next_request to blk-core.c
No need to have this helper inline in a header. Also drop the __ prefix.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk.h')
-rw-r--r-- | block/blk.h | 39 |
1 files changed, 0 insertions, 39 deletions
diff --git a/block/blk.h b/block/blk.h index fcb9775b997d..fda5a4632aba 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -148,45 +148,6 @@ static inline void blk_clear_rq_complete(struct request *rq) | |||
148 | 148 | ||
149 | void blk_insert_flush(struct request *rq); | 149 | void blk_insert_flush(struct request *rq); |
150 | 150 | ||
151 | static inline struct request *__elv_next_request(struct request_queue *q) | ||
152 | { | ||
153 | struct request *rq; | ||
154 | struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); | ||
155 | |||
156 | WARN_ON_ONCE(q->mq_ops); | ||
157 | |||
158 | while (1) { | ||
159 | if (!list_empty(&q->queue_head)) { | ||
160 | rq = list_entry_rq(q->queue_head.next); | ||
161 | return rq; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Flush request is running and flush request isn't queueable | ||
166 | * in the drive, we can hold the queue till flush request is | ||
167 | * finished. Even we don't do this, driver can't dispatch next | ||
168 | * requests and will requeue them. And this can improve | ||
169 | * throughput too. For example, we have request flush1, write1, | ||
170 | * flush 2. flush1 is dispatched, then queue is hold, write1 | ||
171 | * isn't inserted to queue. After flush1 is finished, flush2 | ||
172 | * will be dispatched. Since disk cache is already clean, | ||
173 | * flush2 will be finished very soon, so looks like flush2 is | ||
174 | * folded to flush1. | ||
175 | * Since the queue is hold, a flag is set to indicate the queue | ||
176 | * should be restarted later. Please see flush_end_io() for | ||
177 | * details. | ||
178 | */ | ||
179 | if (fq->flush_pending_idx != fq->flush_running_idx && | ||
180 | !queue_flush_queueable(q)) { | ||
181 | fq->flush_queue_delayed = 1; | ||
182 | return NULL; | ||
183 | } | ||
184 | if (unlikely(blk_queue_bypass(q)) || | ||
185 | !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) | ||
186 | return NULL; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | static inline void elv_activate_rq(struct request_queue *q, struct request *rq) | 151 | static inline void elv_activate_rq(struct request_queue *q, struct request *rq) |
191 | { | 152 | { |
192 | struct elevator_queue *e = q->elevator; | 153 | struct elevator_queue *e = q->elevator; |